prompt
stringlengths
162
4.26M
response
stringlengths
109
5.16M
Generate the Verilog code corresponding to the following Chisel files. File DescribedSRAM.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3.{Data, SyncReadMem, Vec} import chisel3.util.log2Ceil object DescribedSRAM { def apply[T <: Data]( name: String, desc: String, size: BigInt, // depth data: T ): SyncReadMem[T] = { val mem = SyncReadMem(size, data) mem.suggestName(name) val granWidth = data match { case v: Vec[_] => v.head.getWidth case d => d.getWidth } val uid = 0 Annotated.srams( component = mem, name = name, address_width = log2Ceil(size), data_width = data.getWidth, depth = size, description = desc, write_mask_granularity = granWidth ) mem } }
module dataArrayB1Way_1_0( // @[DescribedSRAM.scala:17:26] input [7:0] RW0_addr, input RW0_en, input RW0_clk, input RW0_wmode, input [63:0] RW0_wdata, output [63:0] RW0_rdata ); dataArrayB0Way_0_ext dataArrayB0Way_0_ext ( // @[DescribedSRAM.scala:17:26] .RW0_addr (RW0_addr), .RW0_en (RW0_en), .RW0_clk (RW0_clk), .RW0_wmode (RW0_wmode), .RW0_wdata (RW0_wdata), .RW0_rdata (RW0_rdata) ); // @[DescribedSRAM.scala:17:26] endmodule
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } File functional-unit.scala: //****************************************************************************** // Copyright (c) 2013 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Functional Units //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // // If regfile bypassing is disabled, then the functional unit must do its own // bypassing in here on the WB stage (i.e., bypassing the io.resp.data) // // TODO: explore possibility of conditional IO fields? if a branch unit... how to add extra to IO in subclass? package boom.v3.exu import chisel3._ import chisel3.util._ import chisel3.experimental.dataview._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ import freechips.rocketchip.tile import freechips.rocketchip.rocket.{PipelinedMultiplier,BP,BreakpointUnit,Causes,CSR} import boom.v3.common._ import boom.v3.ifu._ import boom.v3.util._ /**t * Functional unit constants */ object FUConstants { // bit mask, since a given execution pipeline may support multiple functional units val FUC_SZ = 10 val FU_X = BitPat.dontCare(FUC_SZ) val FU_ALU = 1.U(FUC_SZ.W) val FU_JMP = 2.U(FUC_SZ.W) val FU_MEM = 4.U(FUC_SZ.W) val FU_MUL = 8.U(FUC_SZ.W) val FU_DIV = 16.U(FUC_SZ.W) val FU_CSR = 32.U(FUC_SZ.W) val FU_FPU = 64.U(FUC_SZ.W) val FU_FDV = 128.U(FUC_SZ.W) val FU_I2F = 256.U(FUC_SZ.W) val FU_F2I = 512.U(FUC_SZ.W) // FP stores generate data through FP F2I, and generate address through MemAddrCalc val FU_F2IMEM = 516.U(FUC_SZ.W) } import FUConstants._ /** * Class to tell the FUDecoders what units it needs to support * * @param alu support alu unit? * @param bru support br unit? * @param mem support mem unit? * @param muld support multiple div unit? * @param fpu support FP unit? * @param csr support csr writing unit? * @param fdiv support FP div unit? * @param ifpu support int to FP unit? */ class SupportedFuncUnits( val alu: Boolean = false, val jmp: Boolean = false, val mem: Boolean = false, val muld: Boolean = false, val fpu: Boolean = false, val csr: Boolean = false, val fdiv: Boolean = false, val ifpu: Boolean = false) { } /** * Bundle for signals sent to the functional unit * * @param dataWidth width of the data sent to the functional unit */ class FuncUnitReq(val dataWidth: Int)(implicit p: Parameters) extends BoomBundle with HasBoomUOP { val numOperands = 3 val rs1_data = UInt(dataWidth.W) val rs2_data = UInt(dataWidth.W) val rs3_data = UInt(dataWidth.W) // only used for FMA units val pred_data = Bool() val kill = Bool() // kill everything } /** * Bundle for the signals sent out of the function unit * * @param dataWidth data sent from the functional unit */ class FuncUnitResp(val dataWidth: Int)(implicit p: Parameters) extends BoomBundle with HasBoomUOP { val predicated = Bool() // Was this response from a predicated-off instruction val data = UInt(dataWidth.W) val fflags = new ValidIO(new FFlagsResp) val addr = UInt((vaddrBits+1).W) // only for maddr -> LSU val mxcpt = new ValidIO(UInt((freechips.rocketchip.rocket.Causes.all.max+2).W)) //only for maddr->LSU val sfence = Valid(new freechips.rocketchip.rocket.SFenceReq) // only for mcalc } /** * Branch resolution information given from the branch unit */ class BrResolutionInfo(implicit p: Parameters) extends BoomBundle { val uop = new MicroOp val valid = Bool() val mispredict = Bool() val taken = Bool() // which direction did the branch go? val cfi_type = UInt(CFI_SZ.W) // Info for recalculating the pc for this branch val pc_sel = UInt(2.W) val jalr_target = UInt(vaddrBitsExtended.W) val target_offset = SInt() } class BrUpdateInfo(implicit p: Parameters) extends BoomBundle { // On the first cycle we get masks to kill registers val b1 = new BrUpdateMasks // On the second cycle we get indices to reset pointers val b2 = new BrResolutionInfo } class BrUpdateMasks(implicit p: Parameters) extends BoomBundle { val resolve_mask = UInt(maxBrCount.W) val mispredict_mask = UInt(maxBrCount.W) } /** * Abstract top level functional unit class that wraps a lower level hand made functional unit * * @param isPipelined is the functional unit pipelined? * @param numStages how many pipeline stages does the functional unit have * @param numBypassStages how many bypass stages does the function unit have * @param dataWidth width of the data being operated on in the functional unit * @param hasBranchUnit does this functional unit have a branch unit? */ abstract class FunctionalUnit( val isPipelined: Boolean, val numStages: Int, val numBypassStages: Int, val dataWidth: Int, val isJmpUnit: Boolean = false, val isAluUnit: Boolean = false, val isMemAddrCalcUnit: Boolean = false, val needsFcsr: Boolean = false) (implicit p: Parameters) extends BoomModule { val io = IO(new Bundle { val req = Flipped(new DecoupledIO(new FuncUnitReq(dataWidth))) val resp = (new DecoupledIO(new FuncUnitResp(dataWidth))) val brupdate = Input(new BrUpdateInfo()) val bypass = Output(Vec(numBypassStages, Valid(new ExeUnitResp(dataWidth)))) // only used by the fpu unit val fcsr_rm = if (needsFcsr) Input(UInt(tile.FPConstants.RM_SZ.W)) else null // only used by branch unit val brinfo = if (isAluUnit) Output(new BrResolutionInfo()) else null val get_ftq_pc = if (isJmpUnit) Flipped(new GetPCFromFtqIO()) else null val status = if (isMemAddrCalcUnit) Input(new freechips.rocketchip.rocket.MStatus()) else null // only used by memaddr calc unit val bp = if (isMemAddrCalcUnit) Input(Vec(nBreakpoints, new BP)) else null val mcontext = if (isMemAddrCalcUnit) Input(UInt(coreParams.mcontextWidth.W)) else null val scontext = if (isMemAddrCalcUnit) Input(UInt(coreParams.scontextWidth.W)) else null }) io.bypass.foreach { b => b.valid := false.B; b.bits := DontCare } io.resp.valid := false.B io.resp.bits := DontCare if (isJmpUnit) { io.get_ftq_pc.ftq_idx := DontCare } } /** * Abstract top level pipelined functional unit * * Note: this helps track which uops get killed while in intermediate stages, * but it is the job of the consumer to check for kills on the same cycle as consumption!!! * * @param numStages how many pipeline stages does the functional unit have * @param numBypassStages how many bypass stages does the function unit have * @param earliestBypassStage first stage that you can start bypassing from * @param dataWidth width of the data being operated on in the functional unit * @param hasBranchUnit does this functional unit have a branch unit? */ abstract class PipelinedFunctionalUnit( numStages: Int, numBypassStages: Int, earliestBypassStage: Int, dataWidth: Int, isJmpUnit: Boolean = false, isAluUnit: Boolean = false, isMemAddrCalcUnit: Boolean = false, needsFcsr: Boolean = false )(implicit p: Parameters) extends FunctionalUnit( isPipelined = true, numStages = numStages, numBypassStages = numBypassStages, dataWidth = dataWidth, isJmpUnit = isJmpUnit, isAluUnit = isAluUnit, isMemAddrCalcUnit = isMemAddrCalcUnit, needsFcsr = needsFcsr) { // Pipelined functional unit is always ready. io.req.ready := true.B if (numStages > 0) { val r_valids = RegInit(VecInit(Seq.fill(numStages) { false.B })) val r_uops = Reg(Vec(numStages, new MicroOp())) // handle incoming request r_valids(0) := io.req.valid && !IsKilledByBranch(io.brupdate, io.req.bits.uop) && !io.req.bits.kill r_uops(0) := io.req.bits.uop r_uops(0).br_mask := GetNewBrMask(io.brupdate, io.req.bits.uop) // handle middle of the pipeline for (i <- 1 until numStages) { r_valids(i) := r_valids(i-1) && !IsKilledByBranch(io.brupdate, r_uops(i-1)) && !io.req.bits.kill r_uops(i) := r_uops(i-1) r_uops(i).br_mask := GetNewBrMask(io.brupdate, r_uops(i-1)) if (numBypassStages > 0) { io.bypass(i-1).bits.uop := r_uops(i-1) } } // handle outgoing (branch could still kill it) // consumer must also check for pipeline flushes (kills) io.resp.valid := r_valids(numStages-1) && !IsKilledByBranch(io.brupdate, r_uops(numStages-1)) io.resp.bits.predicated := false.B io.resp.bits.uop := r_uops(numStages-1) io.resp.bits.uop.br_mask := GetNewBrMask(io.brupdate, r_uops(numStages-1)) // bypassing (TODO allow bypass vector to have a different size from numStages) if (numBypassStages > 0 && earliestBypassStage == 0) { io.bypass(0).bits.uop := io.req.bits.uop for (i <- 1 until numBypassStages) { io.bypass(i).bits.uop := r_uops(i-1) } } } else { require (numStages == 0) // pass req straight through to response // valid doesn't check kill signals, let consumer deal with it. // The LSU already handles it and this hurts critical path. io.resp.valid := io.req.valid && !IsKilledByBranch(io.brupdate, io.req.bits.uop) io.resp.bits.predicated := false.B io.resp.bits.uop := io.req.bits.uop io.resp.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.req.bits.uop) } } /** * Functional unit that wraps RocketChips ALU * * @param isBranchUnit is this a branch unit? * @param numStages how many pipeline stages does the functional unit have * @param dataWidth width of the data being operated on in the functional unit */ class ALUUnit(isJmpUnit: Boolean = false, numStages: Int = 1, dataWidth: Int)(implicit p: Parameters) extends PipelinedFunctionalUnit( numStages = numStages, numBypassStages = numStages, isAluUnit = true, earliestBypassStage = 0, dataWidth = dataWidth, isJmpUnit = isJmpUnit) with boom.v3.ifu.HasBoomFrontendParameters { val uop = io.req.bits.uop // immediate generation val imm_xprlen = ImmGen(uop.imm_packed, uop.ctrl.imm_sel) // operand 1 select var op1_data: UInt = null if (isJmpUnit) { // Get the uop PC for jumps val block_pc = AlignPCToBoundary(io.get_ftq_pc.pc, icBlockBytes) val uop_pc = (block_pc | uop.pc_lob) - Mux(uop.edge_inst, 2.U, 0.U) op1_data = Mux(uop.ctrl.op1_sel.asUInt === OP1_RS1 , io.req.bits.rs1_data, Mux(uop.ctrl.op1_sel.asUInt === OP1_PC , Sext(uop_pc, xLen), 0.U)) } else { op1_data = Mux(uop.ctrl.op1_sel.asUInt === OP1_RS1 , io.req.bits.rs1_data, 0.U) } // operand 2 select val op2_data = Mux(uop.ctrl.op2_sel === OP2_IMM, Sext(imm_xprlen.asUInt, xLen), Mux(uop.ctrl.op2_sel === OP2_IMMC, io.req.bits.uop.prs1(4,0), Mux(uop.ctrl.op2_sel === OP2_RS2 , io.req.bits.rs2_data, Mux(uop.ctrl.op2_sel === OP2_NEXT, Mux(uop.is_rvc, 2.U, 4.U), 0.U)))) val alu = Module(new freechips.rocketchip.rocket.ALU()) alu.io.in1 := op1_data.asUInt alu.io.in2 := op2_data.asUInt alu.io.fn := uop.ctrl.op_fcn alu.io.dw := uop.ctrl.fcn_dw // Did I just get killed by the previous cycle's branch, // or by a flush pipeline? val killed = WireInit(false.B) when (io.req.bits.kill || IsKilledByBranch(io.brupdate, uop)) { killed := true.B } val rs1 = io.req.bits.rs1_data val rs2 = io.req.bits.rs2_data val br_eq = (rs1 === rs2) val br_ltu = (rs1.asUInt < rs2.asUInt) val br_lt = (~(rs1(xLen-1) ^ rs2(xLen-1)) & br_ltu | rs1(xLen-1) & ~rs2(xLen-1)).asBool val pc_sel = MuxLookup(uop.ctrl.br_type, PC_PLUS4)( Seq( BR_N -> PC_PLUS4, BR_NE -> Mux(!br_eq, PC_BRJMP, PC_PLUS4), BR_EQ -> Mux( br_eq, PC_BRJMP, PC_PLUS4), BR_GE -> Mux(!br_lt, PC_BRJMP, PC_PLUS4), BR_GEU -> Mux(!br_ltu, PC_BRJMP, PC_PLUS4), BR_LT -> Mux( br_lt, PC_BRJMP, PC_PLUS4), BR_LTU -> Mux( br_ltu, PC_BRJMP, PC_PLUS4), BR_J -> PC_BRJMP, BR_JR -> PC_JALR )) val is_taken = io.req.valid && !killed && (uop.is_br || uop.is_jalr || uop.is_jal) && (pc_sel =/= PC_PLUS4) // "mispredict" means that a branch has been resolved and it must be killed val mispredict = WireInit(false.B) val is_br = io.req.valid && !killed && uop.is_br && !uop.is_sfb val is_jal = io.req.valid && !killed && uop.is_jal val is_jalr = io.req.valid && !killed && uop.is_jalr when (is_br || is_jalr) { if (!isJmpUnit) { assert (pc_sel =/= PC_JALR) } when (pc_sel === PC_PLUS4) { mispredict := uop.taken } when (pc_sel === PC_BRJMP) { mispredict := !uop.taken } } val brinfo = Wire(new BrResolutionInfo) // note: jal doesn't allocate a branch-mask, so don't clear a br-mask bit brinfo.valid := is_br || is_jalr brinfo.mispredict := mispredict brinfo.uop := uop brinfo.cfi_type := Mux(is_jalr, CFI_JALR, Mux(is_br , CFI_BR, CFI_X)) brinfo.taken := is_taken brinfo.pc_sel := pc_sel brinfo.jalr_target := DontCare // Branch/Jump Target Calculation // For jumps we read the FTQ, and can calculate the target // For branches we emit the offset for the core to redirect if necessary val target_offset = imm_xprlen(20,0).asSInt brinfo.jalr_target := DontCare if (isJmpUnit) { def encodeVirtualAddress(a0: UInt, ea: UInt) = if (vaddrBitsExtended == vaddrBits) { ea } else { // Efficient means to compress 64-bit VA into vaddrBits+1 bits. // (VA is bad if VA(vaddrBits) != VA(vaddrBits-1)). val a = a0.asSInt >> vaddrBits val msb = Mux(a === 0.S || a === -1.S, ea(vaddrBits), !ea(vaddrBits-1)) Cat(msb, ea(vaddrBits-1,0)) } val jalr_target_base = io.req.bits.rs1_data.asSInt val jalr_target_xlen = Wire(UInt(xLen.W)) jalr_target_xlen := (jalr_target_base + target_offset).asUInt val jalr_target = (encodeVirtualAddress(jalr_target_xlen, jalr_target_xlen).asSInt & -2.S).asUInt brinfo.jalr_target := jalr_target val cfi_idx = ((uop.pc_lob ^ Mux(io.get_ftq_pc.entry.start_bank === 1.U, 1.U << log2Ceil(bankBytes), 0.U)))(log2Ceil(fetchWidth),1) when (pc_sel === PC_JALR) { mispredict := !io.get_ftq_pc.next_val || (io.get_ftq_pc.next_pc =/= jalr_target) || !io.get_ftq_pc.entry.cfi_idx.valid || (io.get_ftq_pc.entry.cfi_idx.bits =/= cfi_idx) } } brinfo.target_offset := target_offset io.brinfo := brinfo // Response // TODO add clock gate on resp bits from functional units // io.resp.bits.data := RegEnable(alu.io.out, io.req.valid) // val reg_data = Reg(outType = Bits(width = xLen)) // reg_data := alu.io.out // io.resp.bits.data := reg_data val r_val = RegInit(VecInit(Seq.fill(numStages) { false.B })) val r_data = Reg(Vec(numStages, UInt(xLen.W))) val r_pred = Reg(Vec(numStages, Bool())) val alu_out = Mux(io.req.bits.uop.is_sfb_shadow && io.req.bits.pred_data, Mux(io.req.bits.uop.ldst_is_rs1, io.req.bits.rs1_data, io.req.bits.rs2_data), Mux(io.req.bits.uop.uopc === uopMOV, io.req.bits.rs2_data, alu.io.out)) r_val (0) := io.req.valid r_data(0) := Mux(io.req.bits.uop.is_sfb_br, pc_sel === PC_BRJMP, alu_out) r_pred(0) := io.req.bits.uop.is_sfb_shadow && io.req.bits.pred_data for (i <- 1 until numStages) { r_val(i) := r_val(i-1) r_data(i) := r_data(i-1) r_pred(i) := r_pred(i-1) } io.resp.bits.data := r_data(numStages-1) io.resp.bits.predicated := r_pred(numStages-1) // Bypass // for the ALU, we can bypass same cycle as compute require (numStages >= 1) require (numBypassStages >= 1) io.bypass(0).valid := io.req.valid io.bypass(0).bits.data := Mux(io.req.bits.uop.is_sfb_br, pc_sel === PC_BRJMP, alu_out) for (i <- 1 until numStages) { io.bypass(i).valid := r_val(i-1) io.bypass(i).bits.data := r_data(i-1) } // Exceptions io.resp.bits.fflags.valid := false.B } /** * Functional unit that passes in base+imm to calculate addresses, and passes store data * to the LSU. * For floating point, 65bit FP store-data needs to be decoded into 64bit FP form */ class MemAddrCalcUnit(implicit p: Parameters) extends PipelinedFunctionalUnit( numStages = 0, numBypassStages = 0, earliestBypassStage = 0, dataWidth = 65, // TODO enable this only if FP is enabled? isMemAddrCalcUnit = true) with freechips.rocketchip.rocket.constants.MemoryOpConstants with freechips.rocketchip.rocket.constants.ScalarOpConstants { // perform address calculation val sum = (io.req.bits.rs1_data.asSInt + io.req.bits.uop.imm_packed(19,8).asSInt).asUInt val ea_sign = Mux(sum(vaddrBits-1), ~sum(63,vaddrBits) === 0.U, sum(63,vaddrBits) =/= 0.U) val effective_address = Cat(ea_sign, sum(vaddrBits-1,0)).asUInt val store_data = io.req.bits.rs2_data io.resp.bits.addr := effective_address io.resp.bits.data := store_data if (dataWidth > 63) { assert (!(io.req.valid && io.req.bits.uop.ctrl.is_std && io.resp.bits.data(64).asBool === true.B), "65th bit set in MemAddrCalcUnit.") assert (!(io.req.valid && io.req.bits.uop.ctrl.is_std && io.req.bits.uop.fp_val), "FP store-data should now be going through a different unit.") } assert (!(io.req.bits.uop.fp_val && io.req.valid && io.req.bits.uop.uopc =/= uopLD && io.req.bits.uop.uopc =/= uopSTA), "[maddrcalc] assert we never get store data in here.") // Handle misaligned exceptions val size = io.req.bits.uop.mem_size val misaligned = (size === 1.U && (effective_address(0) =/= 0.U)) || (size === 2.U && (effective_address(1,0) =/= 0.U)) || (size === 3.U && (effective_address(2,0) =/= 0.U)) val bkptu = Module(new BreakpointUnit(nBreakpoints)) bkptu.io.status := io.status bkptu.io.bp := io.bp bkptu.io.pc := DontCare bkptu.io.ea := effective_address bkptu.io.mcontext := io.mcontext bkptu.io.scontext := io.scontext val ma_ld = io.req.valid && io.req.bits.uop.uopc === uopLD && misaligned val ma_st = io.req.valid && (io.req.bits.uop.uopc === uopSTA || io.req.bits.uop.uopc === uopAMO_AG) && misaligned val dbg_bp = io.req.valid && ((io.req.bits.uop.uopc === uopLD && bkptu.io.debug_ld) || (io.req.bits.uop.uopc === uopSTA && bkptu.io.debug_st)) val bp = io.req.valid && ((io.req.bits.uop.uopc === uopLD && bkptu.io.xcpt_ld) || (io.req.bits.uop.uopc === uopSTA && bkptu.io.xcpt_st)) def checkExceptions(x: Seq[(Bool, UInt)]) = (x.map(_._1).reduce(_||_), PriorityMux(x)) val (xcpt_val, xcpt_cause) = checkExceptions(List( (ma_ld, (Causes.misaligned_load).U), (ma_st, (Causes.misaligned_store).U), (dbg_bp, (CSR.debugTriggerCause).U), (bp, (Causes.breakpoint).U))) io.resp.bits.mxcpt.valid := xcpt_val io.resp.bits.mxcpt.bits := xcpt_cause assert (!(ma_ld && ma_st), "Mutually-exclusive exceptions are firing.") io.resp.bits.sfence.valid := io.req.valid && io.req.bits.uop.mem_cmd === M_SFENCE io.resp.bits.sfence.bits.rs1 := io.req.bits.uop.mem_size(0) io.resp.bits.sfence.bits.rs2 := io.req.bits.uop.mem_size(1) io.resp.bits.sfence.bits.addr := io.req.bits.rs1_data io.resp.bits.sfence.bits.asid := io.req.bits.rs2_data } /** * Functional unit to wrap lower level FPU * * Currently, bypassing is unsupported! * All FP instructions are padded out to the max latency unit for easy * write-port scheduling. */ class FPUUnit(implicit p: Parameters) extends PipelinedFunctionalUnit( numStages = p(tile.TileKey).core.fpu.get.dfmaLatency, numBypassStages = 0, earliestBypassStage = 0, dataWidth = 65, needsFcsr = true) { val fpu = Module(new FPU()) fpu.io.req.valid := io.req.valid fpu.io.req.bits.uop := io.req.bits.uop fpu.io.req.bits.rs1_data := io.req.bits.rs1_data fpu.io.req.bits.rs2_data := io.req.bits.rs2_data fpu.io.req.bits.rs3_data := io.req.bits.rs3_data fpu.io.req.bits.fcsr_rm := io.fcsr_rm io.resp.bits.data := fpu.io.resp.bits.data io.resp.bits.fflags.valid := fpu.io.resp.bits.fflags.valid io.resp.bits.fflags.bits.uop := io.resp.bits.uop io.resp.bits.fflags.bits.flags := fpu.io.resp.bits.fflags.bits.flags // kill me now } /** * Int to FP conversion functional unit * * @param latency the amount of stages to delay by */ class IntToFPUnit(latency: Int)(implicit p: Parameters) extends PipelinedFunctionalUnit( numStages = latency, numBypassStages = 0, earliestBypassStage = 0, dataWidth = 65, needsFcsr = true) with tile.HasFPUParameters { val fp_decoder = Module(new UOPCodeFPUDecoder) // TODO use a simpler decoder val io_req = io.req.bits fp_decoder.io.uopc := io_req.uop.uopc val fp_ctrl = fp_decoder.io.sigs val fp_rm = Mux(ImmGenRm(io_req.uop.imm_packed) === 7.U, io.fcsr_rm, ImmGenRm(io_req.uop.imm_packed)) val req = Wire(new tile.FPInput) val tag = fp_ctrl.typeTagIn req.viewAsSupertype(new tile.FPUCtrlSigs) := fp_ctrl req.rm := fp_rm req.in1 := unbox(io_req.rs1_data, tag, None) req.in2 := unbox(io_req.rs2_data, tag, None) req.in3 := DontCare req.typ := ImmGenTyp(io_req.uop.imm_packed) req.fmt := DontCare // FIXME: this may not be the right thing to do here req.fmaCmd := DontCare assert (!(io.req.valid && fp_ctrl.fromint && req.in1(xLen).asBool), "[func] IntToFP integer input has 65th high-order bit set!") assert (!(io.req.valid && !fp_ctrl.fromint), "[func] Only support fromInt micro-ops.") val ifpu = Module(new tile.IntToFP(intToFpLatency)) ifpu.io.in.valid := io.req.valid ifpu.io.in.bits := req ifpu.io.in.bits.in1 := io_req.rs1_data val out_double = Pipe(io.req.valid, fp_ctrl.typeTagOut === D, intToFpLatency).bits //io.resp.bits.data := box(ifpu.io.out.bits.data, !io.resp.bits.uop.fp_single) io.resp.bits.data := box(ifpu.io.out.bits.data, out_double) io.resp.bits.fflags.valid := ifpu.io.out.valid io.resp.bits.fflags.bits.uop := io.resp.bits.uop io.resp.bits.fflags.bits.flags := ifpu.io.out.bits.exc } /** * Iterative/unpipelined functional unit, can only hold a single MicroOp at a time * assumes at least one register between request and response * * TODO allow up to N micro-ops simultaneously. * * @param dataWidth width of the data to be passed into the functional unit */ abstract class IterativeFunctionalUnit(dataWidth: Int)(implicit p: Parameters) extends FunctionalUnit( isPipelined = false, numStages = 1, numBypassStages = 0, dataWidth = dataWidth) { val r_uop = Reg(new MicroOp()) val do_kill = Wire(Bool()) do_kill := io.req.bits.kill // irrelevant default when (io.req.fire) { // update incoming uop do_kill := IsKilledByBranch(io.brupdate, io.req.bits.uop) || io.req.bits.kill r_uop := io.req.bits.uop r_uop.br_mask := GetNewBrMask(io.brupdate, io.req.bits.uop) } .otherwise { do_kill := IsKilledByBranch(io.brupdate, r_uop) || io.req.bits.kill r_uop.br_mask := GetNewBrMask(io.brupdate, r_uop) } // assumes at least one pipeline register between request and response io.resp.bits.uop := r_uop } /** * Divide functional unit. * * @param dataWidth data to be passed into the functional unit */ class DivUnit(dataWidth: Int)(implicit p: Parameters) extends IterativeFunctionalUnit(dataWidth) { // We don't use the iterative multiply functionality here. // Instead we use the PipelinedMultiplier val div = Module(new freechips.rocketchip.rocket.MulDiv(mulDivParams, width = dataWidth)) // request div.io.req.valid := io.req.valid && !this.do_kill div.io.req.bits.dw := io.req.bits.uop.ctrl.fcn_dw div.io.req.bits.fn := io.req.bits.uop.ctrl.op_fcn div.io.req.bits.in1 := io.req.bits.rs1_data div.io.req.bits.in2 := io.req.bits.rs2_data div.io.req.bits.tag := DontCare io.req.ready := div.io.req.ready // handle pipeline kills and branch misspeculations div.io.kill := this.do_kill // response io.resp.valid := div.io.resp.valid && !this.do_kill div.io.resp.ready := io.resp.ready io.resp.bits.data := div.io.resp.bits.data } /** * Pipelined multiplier functional unit that wraps around the RocketChip pipelined multiplier * * @param numStages number of pipeline stages * @param dataWidth size of the data being passed into the functional unit */ class PipelinedMulUnit(numStages: Int, dataWidth: Int)(implicit p: Parameters) extends PipelinedFunctionalUnit( numStages = numStages, numBypassStages = 0, earliestBypassStage = 0, dataWidth = dataWidth) { val imul = Module(new PipelinedMultiplier(xLen, numStages)) // request imul.io.req.valid := io.req.valid imul.io.req.bits.fn := io.req.bits.uop.ctrl.op_fcn imul.io.req.bits.dw := io.req.bits.uop.ctrl.fcn_dw imul.io.req.bits.in1 := io.req.bits.rs1_data imul.io.req.bits.in2 := io.req.bits.rs2_data imul.io.req.bits.tag := DontCare // response io.resp.bits.data := imul.io.resp.bits.data } File micro-op.scala: //****************************************************************************** // Copyright (c) 2015 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // MicroOp //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.common import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import boom.v3.exu.FUConstants /** * Extension to BoomBundle to add a MicroOp */ abstract trait HasBoomUOP extends BoomBundle { val uop = new MicroOp() } /** * MicroOp passing through the pipeline */ class MicroOp(implicit p: Parameters) extends BoomBundle with freechips.rocketchip.rocket.constants.MemoryOpConstants with freechips.rocketchip.rocket.constants.ScalarOpConstants { val uopc = UInt(UOPC_SZ.W) // micro-op code val inst = UInt(32.W) val debug_inst = UInt(32.W) val is_rvc = Bool() val debug_pc = UInt(coreMaxAddrBits.W) val iq_type = UInt(IQT_SZ.W) // which issue unit do we use? val fu_code = UInt(FUConstants.FUC_SZ.W) // which functional unit do we use? val ctrl = new CtrlSignals // What is the next state of this uop in the issue window? useful // for the compacting queue. val iw_state = UInt(2.W) // Has operand 1 or 2 been waken speculatively by a load? // Only integer operands are speculaively woken up, // so we can ignore p3. val iw_p1_poisoned = Bool() val iw_p2_poisoned = Bool() val is_br = Bool() // is this micro-op a (branch) vs a regular PC+4 inst? val is_jalr = Bool() // is this a jump? (jal or jalr) val is_jal = Bool() // is this a JAL (doesn't include JR)? used for branch unit val is_sfb = Bool() // is this a sfb or in the shadow of a sfb val br_mask = UInt(maxBrCount.W) // which branches are we being speculated under? val br_tag = UInt(brTagSz.W) // Index into FTQ to figure out our fetch PC. val ftq_idx = UInt(log2Ceil(ftqSz).W) // This inst straddles two fetch packets val edge_inst = Bool() // Low-order bits of our own PC. Combine with ftq[ftq_idx] to get PC. // Aligned to a cache-line size, as that is the greater fetch granularity. // TODO: Shouldn't this be aligned to fetch-width size? val pc_lob = UInt(log2Ceil(icBlockBytes).W) // Was this a branch that was predicted taken? val taken = Bool() val imm_packed = UInt(LONGEST_IMM_SZ.W) // densely pack the imm in decode... // then translate and sign-extend in execute val csr_addr = UInt(CSR_ADDR_SZ.W) // only used for critical path reasons in Exe val rob_idx = UInt(robAddrSz.W) val ldq_idx = UInt(ldqAddrSz.W) val stq_idx = UInt(stqAddrSz.W) val rxq_idx = UInt(log2Ceil(numRxqEntries).W) val pdst = UInt(maxPregSz.W) val prs1 = UInt(maxPregSz.W) val prs2 = UInt(maxPregSz.W) val prs3 = UInt(maxPregSz.W) val ppred = UInt(log2Ceil(ftqSz).W) val prs1_busy = Bool() val prs2_busy = Bool() val prs3_busy = Bool() val ppred_busy = Bool() val stale_pdst = UInt(maxPregSz.W) val exception = Bool() val exc_cause = UInt(xLen.W) // TODO compress this down, xlen is insanity val bypassable = Bool() // can we bypass ALU results? (doesn't include loads, csr, etc...) val mem_cmd = UInt(M_SZ.W) // sync primitives/cache flushes val mem_size = UInt(2.W) val mem_signed = Bool() val is_fence = Bool() val is_fencei = Bool() val is_amo = Bool() val uses_ldq = Bool() val uses_stq = Bool() val is_sys_pc2epc = Bool() // Is a ECall or Breakpoint -- both set EPC to PC. val is_unique = Bool() // only allow this instruction in the pipeline, wait for STQ to // drain, clear fetcha fter it (tell ROB to un-ready until empty) val flush_on_commit = Bool() // some instructions need to flush the pipeline behind them // Preditation def is_sfb_br = is_br && is_sfb && enableSFBOpt.B // Does this write a predicate def is_sfb_shadow = !is_br && is_sfb && enableSFBOpt.B // Is this predicated val ldst_is_rs1 = Bool() // If this is set and we are predicated off, copy rs1 to dst, // else copy rs2 to dst // logical specifiers (only used in Decode->Rename), except rollback (ldst) val ldst = UInt(lregSz.W) val lrs1 = UInt(lregSz.W) val lrs2 = UInt(lregSz.W) val lrs3 = UInt(lregSz.W) val ldst_val = Bool() // is there a destination? invalid for stores, rd==x0, etc. val dst_rtype = UInt(2.W) val lrs1_rtype = UInt(2.W) val lrs2_rtype = UInt(2.W) val frs3_en = Bool() // floating point information val fp_val = Bool() // is a floating-point instruction (F- or D-extension)? // If it's non-ld/st it will write back exception bits to the fcsr. val fp_single = Bool() // single-precision floating point instruction (F-extension) // frontend exception information val xcpt_pf_if = Bool() // I-TLB page fault. val xcpt_ae_if = Bool() // I$ access exception. val xcpt_ma_if = Bool() // Misaligned fetch (jal/brjumping to misaligned addr). val bp_debug_if = Bool() // Breakpoint val bp_xcpt_if = Bool() // Breakpoint // What prediction structure provides the prediction FROM this op val debug_fsrc = UInt(BSRC_SZ.W) // What prediction structure provides the prediction TO this op val debug_tsrc = UInt(BSRC_SZ.W) // Do we allocate a branch tag for this? // SFB branches don't get a mask, they get a predicate bit def allocate_brtag = (is_br && !is_sfb) || is_jalr // Does this register write-back def rf_wen = dst_rtype =/= RT_X // Is it possible for this uop to misspeculate, preventing the commit of subsequent uops? def unsafe = uses_ldq || (uses_stq && !is_fence) || is_br || is_jalr def fu_code_is(_fu: UInt) = (fu_code & _fu) =/= 0.U } /** * Control signals within a MicroOp * * TODO REFACTOR this, as this should no longer be true, as bypass occurs in stage before branch resolution */ class CtrlSignals extends Bundle() { val br_type = UInt(BR_N.getWidth.W) val op1_sel = UInt(OP1_X.getWidth.W) val op2_sel = UInt(OP2_X.getWidth.W) val imm_sel = UInt(IS_X.getWidth.W) val op_fcn = UInt(freechips.rocketchip.rocket.ALU.SZ_ALU_FN.W) val fcn_dw = Bool() val csr_cmd = UInt(freechips.rocketchip.rocket.CSR.SZ.W) val is_load = Bool() // will invoke TLB address lookup val is_sta = Bool() // will invoke TLB address lookup val is_std = Bool() }
module ALUUnit( // @[functional-unit.scala:290:7] input clock, // @[functional-unit.scala:290:7] input reset, // @[functional-unit.scala:290:7] input io_req_valid, // @[functional-unit.scala:168:14] input [6:0] io_req_bits_uop_uopc, // @[functional-unit.scala:168:14] input [31:0] io_req_bits_uop_inst, // @[functional-unit.scala:168:14] input [31:0] io_req_bits_uop_debug_inst, // @[functional-unit.scala:168:14] input io_req_bits_uop_is_rvc, // @[functional-unit.scala:168:14] input [39:0] io_req_bits_uop_debug_pc, // @[functional-unit.scala:168:14] input [2:0] io_req_bits_uop_iq_type, // @[functional-unit.scala:168:14] input [9:0] io_req_bits_uop_fu_code, // @[functional-unit.scala:168:14] input [3:0] io_req_bits_uop_ctrl_br_type, // @[functional-unit.scala:168:14] input [1:0] io_req_bits_uop_ctrl_op1_sel, // @[functional-unit.scala:168:14] input [2:0] io_req_bits_uop_ctrl_op2_sel, // @[functional-unit.scala:168:14] input [2:0] io_req_bits_uop_ctrl_imm_sel, // @[functional-unit.scala:168:14] input [4:0] io_req_bits_uop_ctrl_op_fcn, // @[functional-unit.scala:168:14] input io_req_bits_uop_ctrl_fcn_dw, // @[functional-unit.scala:168:14] input [2:0] io_req_bits_uop_ctrl_csr_cmd, // @[functional-unit.scala:168:14] input io_req_bits_uop_ctrl_is_load, // @[functional-unit.scala:168:14] input io_req_bits_uop_ctrl_is_sta, // @[functional-unit.scala:168:14] input io_req_bits_uop_ctrl_is_std, // @[functional-unit.scala:168:14] input [1:0] io_req_bits_uop_iw_state, // @[functional-unit.scala:168:14] input io_req_bits_uop_iw_p1_poisoned, // @[functional-unit.scala:168:14] input io_req_bits_uop_iw_p2_poisoned, // @[functional-unit.scala:168:14] input io_req_bits_uop_is_br, // @[functional-unit.scala:168:14] input io_req_bits_uop_is_jalr, // @[functional-unit.scala:168:14] input io_req_bits_uop_is_jal, // @[functional-unit.scala:168:14] input io_req_bits_uop_is_sfb, // @[functional-unit.scala:168:14] input [15:0] io_req_bits_uop_br_mask, // @[functional-unit.scala:168:14] input [3:0] io_req_bits_uop_br_tag, // @[functional-unit.scala:168:14] input [4:0] io_req_bits_uop_ftq_idx, // @[functional-unit.scala:168:14] input io_req_bits_uop_edge_inst, // @[functional-unit.scala:168:14] input [5:0] io_req_bits_uop_pc_lob, // @[functional-unit.scala:168:14] input io_req_bits_uop_taken, // @[functional-unit.scala:168:14] input [19:0] io_req_bits_uop_imm_packed, // @[functional-unit.scala:168:14] input [11:0] io_req_bits_uop_csr_addr, // @[functional-unit.scala:168:14] input [6:0] io_req_bits_uop_rob_idx, // @[functional-unit.scala:168:14] input [4:0] io_req_bits_uop_ldq_idx, // @[functional-unit.scala:168:14] input [4:0] io_req_bits_uop_stq_idx, // @[functional-unit.scala:168:14] input [1:0] io_req_bits_uop_rxq_idx, // @[functional-unit.scala:168:14] input [6:0] io_req_bits_uop_pdst, // @[functional-unit.scala:168:14] input [6:0] io_req_bits_uop_prs1, // @[functional-unit.scala:168:14] input [6:0] io_req_bits_uop_prs2, // @[functional-unit.scala:168:14] input [6:0] io_req_bits_uop_prs3, // @[functional-unit.scala:168:14] input [4:0] io_req_bits_uop_ppred, // @[functional-unit.scala:168:14] input io_req_bits_uop_prs1_busy, // @[functional-unit.scala:168:14] input io_req_bits_uop_prs2_busy, // @[functional-unit.scala:168:14] input io_req_bits_uop_prs3_busy, // @[functional-unit.scala:168:14] input io_req_bits_uop_ppred_busy, // @[functional-unit.scala:168:14] input [6:0] io_req_bits_uop_stale_pdst, // @[functional-unit.scala:168:14] input io_req_bits_uop_exception, // @[functional-unit.scala:168:14] input [63:0] io_req_bits_uop_exc_cause, // @[functional-unit.scala:168:14] input io_req_bits_uop_bypassable, // @[functional-unit.scala:168:14] input [4:0] io_req_bits_uop_mem_cmd, // @[functional-unit.scala:168:14] input [1:0] io_req_bits_uop_mem_size, // @[functional-unit.scala:168:14] input io_req_bits_uop_mem_signed, // @[functional-unit.scala:168:14] input io_req_bits_uop_is_fence, // @[functional-unit.scala:168:14] input io_req_bits_uop_is_fencei, // @[functional-unit.scala:168:14] input io_req_bits_uop_is_amo, // @[functional-unit.scala:168:14] input io_req_bits_uop_uses_ldq, // @[functional-unit.scala:168:14] input io_req_bits_uop_uses_stq, // @[functional-unit.scala:168:14] input io_req_bits_uop_is_sys_pc2epc, // @[functional-unit.scala:168:14] input io_req_bits_uop_is_unique, // @[functional-unit.scala:168:14] input io_req_bits_uop_flush_on_commit, // @[functional-unit.scala:168:14] input io_req_bits_uop_ldst_is_rs1, // @[functional-unit.scala:168:14] input [5:0] io_req_bits_uop_ldst, // @[functional-unit.scala:168:14] input [5:0] io_req_bits_uop_lrs1, // @[functional-unit.scala:168:14] input [5:0] io_req_bits_uop_lrs2, // @[functional-unit.scala:168:14] input [5:0] io_req_bits_uop_lrs3, // @[functional-unit.scala:168:14] input io_req_bits_uop_ldst_val, // @[functional-unit.scala:168:14] input [1:0] io_req_bits_uop_dst_rtype, // @[functional-unit.scala:168:14] input [1:0] io_req_bits_uop_lrs1_rtype, // @[functional-unit.scala:168:14] input [1:0] io_req_bits_uop_lrs2_rtype, // @[functional-unit.scala:168:14] input io_req_bits_uop_frs3_en, // @[functional-unit.scala:168:14] input io_req_bits_uop_fp_val, // @[functional-unit.scala:168:14] input io_req_bits_uop_fp_single, // @[functional-unit.scala:168:14] input io_req_bits_uop_xcpt_pf_if, // @[functional-unit.scala:168:14] input io_req_bits_uop_xcpt_ae_if, // @[functional-unit.scala:168:14] input io_req_bits_uop_xcpt_ma_if, // @[functional-unit.scala:168:14] input io_req_bits_uop_bp_debug_if, // @[functional-unit.scala:168:14] input io_req_bits_uop_bp_xcpt_if, // @[functional-unit.scala:168:14] input [1:0] io_req_bits_uop_debug_fsrc, // @[functional-unit.scala:168:14] input [1:0] io_req_bits_uop_debug_tsrc, // @[functional-unit.scala:168:14] input [63:0] io_req_bits_rs1_data, // @[functional-unit.scala:168:14] input [63:0] io_req_bits_rs2_data, // @[functional-unit.scala:168:14] input io_req_bits_kill, // @[functional-unit.scala:168:14] output io_resp_valid, // @[functional-unit.scala:168:14] output [6:0] io_resp_bits_uop_uopc, // @[functional-unit.scala:168:14] output [31:0] io_resp_bits_uop_inst, // @[functional-unit.scala:168:14] output [31:0] io_resp_bits_uop_debug_inst, // @[functional-unit.scala:168:14] output io_resp_bits_uop_is_rvc, // @[functional-unit.scala:168:14] output [39:0] io_resp_bits_uop_debug_pc, // @[functional-unit.scala:168:14] output [2:0] io_resp_bits_uop_iq_type, // @[functional-unit.scala:168:14] output [9:0] io_resp_bits_uop_fu_code, // @[functional-unit.scala:168:14] output [3:0] io_resp_bits_uop_ctrl_br_type, // @[functional-unit.scala:168:14] output [1:0] io_resp_bits_uop_ctrl_op1_sel, // @[functional-unit.scala:168:14] output [2:0] io_resp_bits_uop_ctrl_op2_sel, // @[functional-unit.scala:168:14] output [2:0] io_resp_bits_uop_ctrl_imm_sel, // @[functional-unit.scala:168:14] output [4:0] io_resp_bits_uop_ctrl_op_fcn, // @[functional-unit.scala:168:14] output io_resp_bits_uop_ctrl_fcn_dw, // @[functional-unit.scala:168:14] output [2:0] io_resp_bits_uop_ctrl_csr_cmd, // @[functional-unit.scala:168:14] output io_resp_bits_uop_ctrl_is_load, // @[functional-unit.scala:168:14] output io_resp_bits_uop_ctrl_is_sta, // @[functional-unit.scala:168:14] output io_resp_bits_uop_ctrl_is_std, // @[functional-unit.scala:168:14] output [1:0] io_resp_bits_uop_iw_state, // @[functional-unit.scala:168:14] output io_resp_bits_uop_iw_p1_poisoned, // @[functional-unit.scala:168:14] output io_resp_bits_uop_iw_p2_poisoned, // @[functional-unit.scala:168:14] output io_resp_bits_uop_is_br, // @[functional-unit.scala:168:14] output io_resp_bits_uop_is_jalr, // @[functional-unit.scala:168:14] output io_resp_bits_uop_is_jal, // @[functional-unit.scala:168:14] output io_resp_bits_uop_is_sfb, // @[functional-unit.scala:168:14] output [15:0] io_resp_bits_uop_br_mask, // @[functional-unit.scala:168:14] output [3:0] io_resp_bits_uop_br_tag, // @[functional-unit.scala:168:14] output [4:0] io_resp_bits_uop_ftq_idx, // @[functional-unit.scala:168:14] output io_resp_bits_uop_edge_inst, // @[functional-unit.scala:168:14] output [5:0] io_resp_bits_uop_pc_lob, // @[functional-unit.scala:168:14] output io_resp_bits_uop_taken, // @[functional-unit.scala:168:14] output [19:0] io_resp_bits_uop_imm_packed, // @[functional-unit.scala:168:14] output [11:0] io_resp_bits_uop_csr_addr, // @[functional-unit.scala:168:14] output [6:0] io_resp_bits_uop_rob_idx, // @[functional-unit.scala:168:14] output [4:0] io_resp_bits_uop_ldq_idx, // @[functional-unit.scala:168:14] output [4:0] io_resp_bits_uop_stq_idx, // @[functional-unit.scala:168:14] output [1:0] io_resp_bits_uop_rxq_idx, // @[functional-unit.scala:168:14] output [6:0] io_resp_bits_uop_pdst, // @[functional-unit.scala:168:14] output [6:0] io_resp_bits_uop_prs1, // @[functional-unit.scala:168:14] output [6:0] io_resp_bits_uop_prs2, // @[functional-unit.scala:168:14] output [6:0] io_resp_bits_uop_prs3, // @[functional-unit.scala:168:14] output [4:0] io_resp_bits_uop_ppred, // @[functional-unit.scala:168:14] output io_resp_bits_uop_prs1_busy, // @[functional-unit.scala:168:14] output io_resp_bits_uop_prs2_busy, // @[functional-unit.scala:168:14] output io_resp_bits_uop_prs3_busy, // @[functional-unit.scala:168:14] output io_resp_bits_uop_ppred_busy, // @[functional-unit.scala:168:14] output [6:0] io_resp_bits_uop_stale_pdst, // @[functional-unit.scala:168:14] output io_resp_bits_uop_exception, // @[functional-unit.scala:168:14] output [63:0] io_resp_bits_uop_exc_cause, // @[functional-unit.scala:168:14] output io_resp_bits_uop_bypassable, // @[functional-unit.scala:168:14] output [4:0] io_resp_bits_uop_mem_cmd, // @[functional-unit.scala:168:14] output [1:0] io_resp_bits_uop_mem_size, // @[functional-unit.scala:168:14] output io_resp_bits_uop_mem_signed, // @[functional-unit.scala:168:14] output io_resp_bits_uop_is_fence, // @[functional-unit.scala:168:14] output io_resp_bits_uop_is_fencei, // @[functional-unit.scala:168:14] output io_resp_bits_uop_is_amo, // @[functional-unit.scala:168:14] output io_resp_bits_uop_uses_ldq, // @[functional-unit.scala:168:14] output io_resp_bits_uop_uses_stq, // @[functional-unit.scala:168:14] output io_resp_bits_uop_is_sys_pc2epc, // @[functional-unit.scala:168:14] output io_resp_bits_uop_is_unique, // @[functional-unit.scala:168:14] output io_resp_bits_uop_flush_on_commit, // @[functional-unit.scala:168:14] output io_resp_bits_uop_ldst_is_rs1, // @[functional-unit.scala:168:14] output [5:0] io_resp_bits_uop_ldst, // @[functional-unit.scala:168:14] output [5:0] io_resp_bits_uop_lrs1, // @[functional-unit.scala:168:14] output [5:0] io_resp_bits_uop_lrs2, // @[functional-unit.scala:168:14] output [5:0] io_resp_bits_uop_lrs3, // @[functional-unit.scala:168:14] output io_resp_bits_uop_ldst_val, // @[functional-unit.scala:168:14] output [1:0] io_resp_bits_uop_dst_rtype, // @[functional-unit.scala:168:14] output [1:0] io_resp_bits_uop_lrs1_rtype, // @[functional-unit.scala:168:14] output [1:0] io_resp_bits_uop_lrs2_rtype, // @[functional-unit.scala:168:14] output io_resp_bits_uop_frs3_en, // @[functional-unit.scala:168:14] output io_resp_bits_uop_fp_val, // @[functional-unit.scala:168:14] output io_resp_bits_uop_fp_single, // @[functional-unit.scala:168:14] output io_resp_bits_uop_xcpt_pf_if, // @[functional-unit.scala:168:14] output io_resp_bits_uop_xcpt_ae_if, // @[functional-unit.scala:168:14] output io_resp_bits_uop_xcpt_ma_if, // @[functional-unit.scala:168:14] output io_resp_bits_uop_bp_debug_if, // @[functional-unit.scala:168:14] output io_resp_bits_uop_bp_xcpt_if, // @[functional-unit.scala:168:14] output [1:0] io_resp_bits_uop_debug_fsrc, // @[functional-unit.scala:168:14] output [1:0] io_resp_bits_uop_debug_tsrc, // @[functional-unit.scala:168:14] output [63:0] io_resp_bits_data, // @[functional-unit.scala:168:14] input [15:0] io_brupdate_b1_resolve_mask, // @[functional-unit.scala:168:14] input [15:0] io_brupdate_b1_mispredict_mask, // @[functional-unit.scala:168:14] input [6:0] io_brupdate_b2_uop_uopc, // @[functional-unit.scala:168:14] input [31:0] io_brupdate_b2_uop_inst, // @[functional-unit.scala:168:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_is_rvc, // @[functional-unit.scala:168:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[functional-unit.scala:168:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[functional-unit.scala:168:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[functional-unit.scala:168:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[functional-unit.scala:168:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[functional-unit.scala:168:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[functional-unit.scala:168:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[functional-unit.scala:168:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[functional-unit.scala:168:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_ctrl_is_load, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_ctrl_is_std, // @[functional-unit.scala:168:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_is_br, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_is_jalr, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_is_jal, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_is_sfb, // @[functional-unit.scala:168:14] input [15:0] io_brupdate_b2_uop_br_mask, // @[functional-unit.scala:168:14] input [3:0] io_brupdate_b2_uop_br_tag, // @[functional-unit.scala:168:14] input [4:0] io_brupdate_b2_uop_ftq_idx, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_edge_inst, // @[functional-unit.scala:168:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_taken, // @[functional-unit.scala:168:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[functional-unit.scala:168:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[functional-unit.scala:168:14] input [6:0] io_brupdate_b2_uop_rob_idx, // @[functional-unit.scala:168:14] input [4:0] io_brupdate_b2_uop_ldq_idx, // @[functional-unit.scala:168:14] input [4:0] io_brupdate_b2_uop_stq_idx, // @[functional-unit.scala:168:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[functional-unit.scala:168:14] input [6:0] io_brupdate_b2_uop_pdst, // @[functional-unit.scala:168:14] input [6:0] io_brupdate_b2_uop_prs1, // @[functional-unit.scala:168:14] input [6:0] io_brupdate_b2_uop_prs2, // @[functional-unit.scala:168:14] input [6:0] io_brupdate_b2_uop_prs3, // @[functional-unit.scala:168:14] input [4:0] io_brupdate_b2_uop_ppred, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_prs1_busy, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_prs2_busy, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_prs3_busy, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_ppred_busy, // @[functional-unit.scala:168:14] input [6:0] io_brupdate_b2_uop_stale_pdst, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_exception, // @[functional-unit.scala:168:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_bypassable, // @[functional-unit.scala:168:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[functional-unit.scala:168:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_mem_signed, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_is_fence, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_is_fencei, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_is_amo, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_uses_ldq, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_uses_stq, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_is_unique, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_flush_on_commit, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[functional-unit.scala:168:14] input [5:0] io_brupdate_b2_uop_ldst, // @[functional-unit.scala:168:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[functional-unit.scala:168:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[functional-unit.scala:168:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_ldst_val, // @[functional-unit.scala:168:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[functional-unit.scala:168:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[functional-unit.scala:168:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_frs3_en, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_fp_val, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_fp_single, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_bp_debug_if, // @[functional-unit.scala:168:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[functional-unit.scala:168:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[functional-unit.scala:168:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[functional-unit.scala:168:14] input io_brupdate_b2_valid, // @[functional-unit.scala:168:14] input io_brupdate_b2_mispredict, // @[functional-unit.scala:168:14] input io_brupdate_b2_taken, // @[functional-unit.scala:168:14] input [2:0] io_brupdate_b2_cfi_type, // @[functional-unit.scala:168:14] input [1:0] io_brupdate_b2_pc_sel, // @[functional-unit.scala:168:14] input [39:0] io_brupdate_b2_jalr_target, // @[functional-unit.scala:168:14] input [20:0] io_brupdate_b2_target_offset, // @[functional-unit.scala:168:14] output io_bypass_0_valid, // @[functional-unit.scala:168:14] output [6:0] io_bypass_0_bits_uop_uopc, // @[functional-unit.scala:168:14] output [31:0] io_bypass_0_bits_uop_inst, // @[functional-unit.scala:168:14] output [31:0] io_bypass_0_bits_uop_debug_inst, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_is_rvc, // @[functional-unit.scala:168:14] output [39:0] io_bypass_0_bits_uop_debug_pc, // @[functional-unit.scala:168:14] output [2:0] io_bypass_0_bits_uop_iq_type, // @[functional-unit.scala:168:14] output [9:0] io_bypass_0_bits_uop_fu_code, // @[functional-unit.scala:168:14] output [3:0] io_bypass_0_bits_uop_ctrl_br_type, // @[functional-unit.scala:168:14] output [1:0] io_bypass_0_bits_uop_ctrl_op1_sel, // @[functional-unit.scala:168:14] output [2:0] io_bypass_0_bits_uop_ctrl_op2_sel, // @[functional-unit.scala:168:14] output [2:0] io_bypass_0_bits_uop_ctrl_imm_sel, // @[functional-unit.scala:168:14] output [4:0] io_bypass_0_bits_uop_ctrl_op_fcn, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_ctrl_fcn_dw, // @[functional-unit.scala:168:14] output [2:0] io_bypass_0_bits_uop_ctrl_csr_cmd, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_ctrl_is_load, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_ctrl_is_sta, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_ctrl_is_std, // @[functional-unit.scala:168:14] output [1:0] io_bypass_0_bits_uop_iw_state, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_iw_p1_poisoned, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_iw_p2_poisoned, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_is_br, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_is_jalr, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_is_jal, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_is_sfb, // @[functional-unit.scala:168:14] output [15:0] io_bypass_0_bits_uop_br_mask, // @[functional-unit.scala:168:14] output [3:0] io_bypass_0_bits_uop_br_tag, // @[functional-unit.scala:168:14] output [4:0] io_bypass_0_bits_uop_ftq_idx, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_edge_inst, // @[functional-unit.scala:168:14] output [5:0] io_bypass_0_bits_uop_pc_lob, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_taken, // @[functional-unit.scala:168:14] output [19:0] io_bypass_0_bits_uop_imm_packed, // @[functional-unit.scala:168:14] output [11:0] io_bypass_0_bits_uop_csr_addr, // @[functional-unit.scala:168:14] output [6:0] io_bypass_0_bits_uop_rob_idx, // @[functional-unit.scala:168:14] output [4:0] io_bypass_0_bits_uop_ldq_idx, // @[functional-unit.scala:168:14] output [4:0] io_bypass_0_bits_uop_stq_idx, // @[functional-unit.scala:168:14] output [1:0] io_bypass_0_bits_uop_rxq_idx, // @[functional-unit.scala:168:14] output [6:0] io_bypass_0_bits_uop_pdst, // @[functional-unit.scala:168:14] output [6:0] io_bypass_0_bits_uop_prs1, // @[functional-unit.scala:168:14] output [6:0] io_bypass_0_bits_uop_prs2, // @[functional-unit.scala:168:14] output [6:0] io_bypass_0_bits_uop_prs3, // @[functional-unit.scala:168:14] output [4:0] io_bypass_0_bits_uop_ppred, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_prs1_busy, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_prs2_busy, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_prs3_busy, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_ppred_busy, // @[functional-unit.scala:168:14] output [6:0] io_bypass_0_bits_uop_stale_pdst, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_exception, // @[functional-unit.scala:168:14] output [63:0] io_bypass_0_bits_uop_exc_cause, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_bypassable, // @[functional-unit.scala:168:14] output [4:0] io_bypass_0_bits_uop_mem_cmd, // @[functional-unit.scala:168:14] output [1:0] io_bypass_0_bits_uop_mem_size, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_mem_signed, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_is_fence, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_is_fencei, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_is_amo, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_uses_ldq, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_uses_stq, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_is_sys_pc2epc, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_is_unique, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_flush_on_commit, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_ldst_is_rs1, // @[functional-unit.scala:168:14] output [5:0] io_bypass_0_bits_uop_ldst, // @[functional-unit.scala:168:14] output [5:0] io_bypass_0_bits_uop_lrs1, // @[functional-unit.scala:168:14] output [5:0] io_bypass_0_bits_uop_lrs2, // @[functional-unit.scala:168:14] output [5:0] io_bypass_0_bits_uop_lrs3, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_ldst_val, // @[functional-unit.scala:168:14] output [1:0] io_bypass_0_bits_uop_dst_rtype, // @[functional-unit.scala:168:14] output [1:0] io_bypass_0_bits_uop_lrs1_rtype, // @[functional-unit.scala:168:14] output [1:0] io_bypass_0_bits_uop_lrs2_rtype, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_frs3_en, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_fp_val, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_fp_single, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_xcpt_pf_if, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_xcpt_ae_if, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_xcpt_ma_if, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_bp_debug_if, // @[functional-unit.scala:168:14] output io_bypass_0_bits_uop_bp_xcpt_if, // @[functional-unit.scala:168:14] output [1:0] io_bypass_0_bits_uop_debug_fsrc, // @[functional-unit.scala:168:14] output [1:0] io_bypass_0_bits_uop_debug_tsrc, // @[functional-unit.scala:168:14] output [63:0] io_bypass_0_bits_data, // @[functional-unit.scala:168:14] output [6:0] io_brinfo_uop_uopc, // @[functional-unit.scala:168:14] output [31:0] io_brinfo_uop_inst, // @[functional-unit.scala:168:14] output [31:0] io_brinfo_uop_debug_inst, // @[functional-unit.scala:168:14] output io_brinfo_uop_is_rvc, // @[functional-unit.scala:168:14] output [39:0] io_brinfo_uop_debug_pc, // @[functional-unit.scala:168:14] output [2:0] io_brinfo_uop_iq_type, // @[functional-unit.scala:168:14] output [9:0] io_brinfo_uop_fu_code, // @[functional-unit.scala:168:14] output [3:0] io_brinfo_uop_ctrl_br_type, // @[functional-unit.scala:168:14] output [1:0] io_brinfo_uop_ctrl_op1_sel, // @[functional-unit.scala:168:14] output [2:0] io_brinfo_uop_ctrl_op2_sel, // @[functional-unit.scala:168:14] output [2:0] io_brinfo_uop_ctrl_imm_sel, // @[functional-unit.scala:168:14] output [4:0] io_brinfo_uop_ctrl_op_fcn, // @[functional-unit.scala:168:14] output io_brinfo_uop_ctrl_fcn_dw, // @[functional-unit.scala:168:14] output [2:0] io_brinfo_uop_ctrl_csr_cmd, // @[functional-unit.scala:168:14] output io_brinfo_uop_ctrl_is_load, // @[functional-unit.scala:168:14] output io_brinfo_uop_ctrl_is_sta, // @[functional-unit.scala:168:14] output io_brinfo_uop_ctrl_is_std, // @[functional-unit.scala:168:14] output [1:0] io_brinfo_uop_iw_state, // @[functional-unit.scala:168:14] output io_brinfo_uop_iw_p1_poisoned, // @[functional-unit.scala:168:14] output io_brinfo_uop_iw_p2_poisoned, // @[functional-unit.scala:168:14] output io_brinfo_uop_is_br, // @[functional-unit.scala:168:14] output io_brinfo_uop_is_jalr, // @[functional-unit.scala:168:14] output io_brinfo_uop_is_jal, // @[functional-unit.scala:168:14] output io_brinfo_uop_is_sfb, // @[functional-unit.scala:168:14] output [15:0] io_brinfo_uop_br_mask, // @[functional-unit.scala:168:14] output [3:0] io_brinfo_uop_br_tag, // @[functional-unit.scala:168:14] output [4:0] io_brinfo_uop_ftq_idx, // @[functional-unit.scala:168:14] output io_brinfo_uop_edge_inst, // @[functional-unit.scala:168:14] output [5:0] io_brinfo_uop_pc_lob, // @[functional-unit.scala:168:14] output io_brinfo_uop_taken, // @[functional-unit.scala:168:14] output [19:0] io_brinfo_uop_imm_packed, // @[functional-unit.scala:168:14] output [11:0] io_brinfo_uop_csr_addr, // @[functional-unit.scala:168:14] output [6:0] io_brinfo_uop_rob_idx, // @[functional-unit.scala:168:14] output [4:0] io_brinfo_uop_ldq_idx, // @[functional-unit.scala:168:14] output [4:0] io_brinfo_uop_stq_idx, // @[functional-unit.scala:168:14] output [1:0] io_brinfo_uop_rxq_idx, // @[functional-unit.scala:168:14] output [6:0] io_brinfo_uop_pdst, // @[functional-unit.scala:168:14] output [6:0] io_brinfo_uop_prs1, // @[functional-unit.scala:168:14] output [6:0] io_brinfo_uop_prs2, // @[functional-unit.scala:168:14] output [6:0] io_brinfo_uop_prs3, // @[functional-unit.scala:168:14] output [4:0] io_brinfo_uop_ppred, // @[functional-unit.scala:168:14] output io_brinfo_uop_prs1_busy, // @[functional-unit.scala:168:14] output io_brinfo_uop_prs2_busy, // @[functional-unit.scala:168:14] output io_brinfo_uop_prs3_busy, // @[functional-unit.scala:168:14] output io_brinfo_uop_ppred_busy, // @[functional-unit.scala:168:14] output [6:0] io_brinfo_uop_stale_pdst, // @[functional-unit.scala:168:14] output io_brinfo_uop_exception, // @[functional-unit.scala:168:14] output [63:0] io_brinfo_uop_exc_cause, // @[functional-unit.scala:168:14] output io_brinfo_uop_bypassable, // @[functional-unit.scala:168:14] output [4:0] io_brinfo_uop_mem_cmd, // @[functional-unit.scala:168:14] output [1:0] io_brinfo_uop_mem_size, // @[functional-unit.scala:168:14] output io_brinfo_uop_mem_signed, // @[functional-unit.scala:168:14] output io_brinfo_uop_is_fence, // @[functional-unit.scala:168:14] output io_brinfo_uop_is_fencei, // @[functional-unit.scala:168:14] output io_brinfo_uop_is_amo, // @[functional-unit.scala:168:14] output io_brinfo_uop_uses_ldq, // @[functional-unit.scala:168:14] output io_brinfo_uop_uses_stq, // @[functional-unit.scala:168:14] output io_brinfo_uop_is_sys_pc2epc, // @[functional-unit.scala:168:14] output io_brinfo_uop_is_unique, // @[functional-unit.scala:168:14] output io_brinfo_uop_flush_on_commit, // @[functional-unit.scala:168:14] output io_brinfo_uop_ldst_is_rs1, // @[functional-unit.scala:168:14] output [5:0] io_brinfo_uop_ldst, // @[functional-unit.scala:168:14] output [5:0] io_brinfo_uop_lrs1, // @[functional-unit.scala:168:14] output [5:0] io_brinfo_uop_lrs2, // @[functional-unit.scala:168:14] output [5:0] io_brinfo_uop_lrs3, // @[functional-unit.scala:168:14] output io_brinfo_uop_ldst_val, // @[functional-unit.scala:168:14] output [1:0] io_brinfo_uop_dst_rtype, // @[functional-unit.scala:168:14] output [1:0] io_brinfo_uop_lrs1_rtype, // @[functional-unit.scala:168:14] output [1:0] io_brinfo_uop_lrs2_rtype, // @[functional-unit.scala:168:14] output io_brinfo_uop_frs3_en, // @[functional-unit.scala:168:14] output io_brinfo_uop_fp_val, // @[functional-unit.scala:168:14] output io_brinfo_uop_fp_single, // @[functional-unit.scala:168:14] output io_brinfo_uop_xcpt_pf_if, // @[functional-unit.scala:168:14] output io_brinfo_uop_xcpt_ae_if, // @[functional-unit.scala:168:14] output io_brinfo_uop_xcpt_ma_if, // @[functional-unit.scala:168:14] output io_brinfo_uop_bp_debug_if, // @[functional-unit.scala:168:14] output io_brinfo_uop_bp_xcpt_if, // @[functional-unit.scala:168:14] output [1:0] io_brinfo_uop_debug_fsrc, // @[functional-unit.scala:168:14] output [1:0] io_brinfo_uop_debug_tsrc, // @[functional-unit.scala:168:14] output io_brinfo_valid, // @[functional-unit.scala:168:14] output io_brinfo_mispredict, // @[functional-unit.scala:168:14] output io_brinfo_taken, // @[functional-unit.scala:168:14] output [2:0] io_brinfo_cfi_type, // @[functional-unit.scala:168:14] output [1:0] io_brinfo_pc_sel, // @[functional-unit.scala:168:14] output [39:0] io_brinfo_jalr_target, // @[functional-unit.scala:168:14] output [20:0] io_brinfo_target_offset, // @[functional-unit.scala:168:14] input io_get_ftq_pc_entry_cfi_idx_valid, // @[functional-unit.scala:168:14] input [2:0] io_get_ftq_pc_entry_cfi_idx_bits, // @[functional-unit.scala:168:14] input io_get_ftq_pc_entry_cfi_taken, // @[functional-unit.scala:168:14] input io_get_ftq_pc_entry_cfi_mispredicted, // @[functional-unit.scala:168:14] input [2:0] io_get_ftq_pc_entry_cfi_type, // @[functional-unit.scala:168:14] input [7:0] io_get_ftq_pc_entry_br_mask, // @[functional-unit.scala:168:14] input io_get_ftq_pc_entry_cfi_is_call, // @[functional-unit.scala:168:14] input io_get_ftq_pc_entry_cfi_is_ret, // @[functional-unit.scala:168:14] input io_get_ftq_pc_entry_cfi_npc_plus4, // @[functional-unit.scala:168:14] input [39:0] io_get_ftq_pc_entry_ras_top, // @[functional-unit.scala:168:14] input [4:0] io_get_ftq_pc_entry_ras_idx, // @[functional-unit.scala:168:14] input io_get_ftq_pc_entry_start_bank, // @[functional-unit.scala:168:14] input [39:0] io_get_ftq_pc_pc, // @[functional-unit.scala:168:14] input io_get_ftq_pc_next_val, // @[functional-unit.scala:168:14] input [39:0] io_get_ftq_pc_next_pc // @[functional-unit.scala:168:14] ); wire [63:0] _alu_io_out; // @[functional-unit.scala:327:19] wire io_req_valid_0 = io_req_valid; // @[functional-unit.scala:290:7] wire [6:0] io_req_bits_uop_uopc_0 = io_req_bits_uop_uopc; // @[functional-unit.scala:290:7] wire [31:0] io_req_bits_uop_inst_0 = io_req_bits_uop_inst; // @[functional-unit.scala:290:7] wire [31:0] io_req_bits_uop_debug_inst_0 = io_req_bits_uop_debug_inst; // @[functional-unit.scala:290:7] wire io_req_bits_uop_is_rvc_0 = io_req_bits_uop_is_rvc; // @[functional-unit.scala:290:7] wire [39:0] io_req_bits_uop_debug_pc_0 = io_req_bits_uop_debug_pc; // @[functional-unit.scala:290:7] wire [2:0] io_req_bits_uop_iq_type_0 = io_req_bits_uop_iq_type; // @[functional-unit.scala:290:7] wire [9:0] io_req_bits_uop_fu_code_0 = io_req_bits_uop_fu_code; // @[functional-unit.scala:290:7] wire [3:0] io_req_bits_uop_ctrl_br_type_0 = io_req_bits_uop_ctrl_br_type; // @[functional-unit.scala:290:7] wire [1:0] io_req_bits_uop_ctrl_op1_sel_0 = io_req_bits_uop_ctrl_op1_sel; // @[functional-unit.scala:290:7] wire [2:0] io_req_bits_uop_ctrl_op2_sel_0 = io_req_bits_uop_ctrl_op2_sel; // @[functional-unit.scala:290:7] wire [2:0] io_req_bits_uop_ctrl_imm_sel_0 = io_req_bits_uop_ctrl_imm_sel; // @[functional-unit.scala:290:7] wire [4:0] io_req_bits_uop_ctrl_op_fcn_0 = io_req_bits_uop_ctrl_op_fcn; // @[functional-unit.scala:290:7] wire io_req_bits_uop_ctrl_fcn_dw_0 = io_req_bits_uop_ctrl_fcn_dw; // @[functional-unit.scala:290:7] wire [2:0] io_req_bits_uop_ctrl_csr_cmd_0 = io_req_bits_uop_ctrl_csr_cmd; // @[functional-unit.scala:290:7] wire io_req_bits_uop_ctrl_is_load_0 = io_req_bits_uop_ctrl_is_load; // @[functional-unit.scala:290:7] wire io_req_bits_uop_ctrl_is_sta_0 = io_req_bits_uop_ctrl_is_sta; // @[functional-unit.scala:290:7] wire io_req_bits_uop_ctrl_is_std_0 = io_req_bits_uop_ctrl_is_std; // @[functional-unit.scala:290:7] wire [1:0] io_req_bits_uop_iw_state_0 = io_req_bits_uop_iw_state; // @[functional-unit.scala:290:7] wire io_req_bits_uop_iw_p1_poisoned_0 = io_req_bits_uop_iw_p1_poisoned; // @[functional-unit.scala:290:7] wire io_req_bits_uop_iw_p2_poisoned_0 = io_req_bits_uop_iw_p2_poisoned; // @[functional-unit.scala:290:7] wire io_req_bits_uop_is_br_0 = io_req_bits_uop_is_br; // @[functional-unit.scala:290:7] wire io_req_bits_uop_is_jalr_0 = io_req_bits_uop_is_jalr; // @[functional-unit.scala:290:7] wire io_req_bits_uop_is_jal_0 = io_req_bits_uop_is_jal; // @[functional-unit.scala:290:7] wire io_req_bits_uop_is_sfb_0 = io_req_bits_uop_is_sfb; // @[functional-unit.scala:290:7] wire [15:0] io_req_bits_uop_br_mask_0 = io_req_bits_uop_br_mask; // @[functional-unit.scala:290:7] wire [3:0] io_req_bits_uop_br_tag_0 = io_req_bits_uop_br_tag; // @[functional-unit.scala:290:7] wire [4:0] io_req_bits_uop_ftq_idx_0 = io_req_bits_uop_ftq_idx; // @[functional-unit.scala:290:7] wire io_req_bits_uop_edge_inst_0 = io_req_bits_uop_edge_inst; // @[functional-unit.scala:290:7] wire [5:0] io_req_bits_uop_pc_lob_0 = io_req_bits_uop_pc_lob; // @[functional-unit.scala:290:7] wire io_req_bits_uop_taken_0 = io_req_bits_uop_taken; // @[functional-unit.scala:290:7] wire [19:0] io_req_bits_uop_imm_packed_0 = io_req_bits_uop_imm_packed; // @[functional-unit.scala:290:7] wire [11:0] io_req_bits_uop_csr_addr_0 = io_req_bits_uop_csr_addr; // @[functional-unit.scala:290:7] wire [6:0] io_req_bits_uop_rob_idx_0 = io_req_bits_uop_rob_idx; // @[functional-unit.scala:290:7] wire [4:0] io_req_bits_uop_ldq_idx_0 = io_req_bits_uop_ldq_idx; // @[functional-unit.scala:290:7] wire [4:0] io_req_bits_uop_stq_idx_0 = io_req_bits_uop_stq_idx; // @[functional-unit.scala:290:7] wire [1:0] io_req_bits_uop_rxq_idx_0 = io_req_bits_uop_rxq_idx; // @[functional-unit.scala:290:7] wire [6:0] io_req_bits_uop_pdst_0 = io_req_bits_uop_pdst; // @[functional-unit.scala:290:7] wire [6:0] io_req_bits_uop_prs1_0 = io_req_bits_uop_prs1; // @[functional-unit.scala:290:7] wire [6:0] io_req_bits_uop_prs2_0 = io_req_bits_uop_prs2; // @[functional-unit.scala:290:7] wire [6:0] io_req_bits_uop_prs3_0 = io_req_bits_uop_prs3; // @[functional-unit.scala:290:7] wire [4:0] io_req_bits_uop_ppred_0 = io_req_bits_uop_ppred; // @[functional-unit.scala:290:7] wire io_req_bits_uop_prs1_busy_0 = io_req_bits_uop_prs1_busy; // @[functional-unit.scala:290:7] wire io_req_bits_uop_prs2_busy_0 = io_req_bits_uop_prs2_busy; // @[functional-unit.scala:290:7] wire io_req_bits_uop_prs3_busy_0 = io_req_bits_uop_prs3_busy; // @[functional-unit.scala:290:7] wire io_req_bits_uop_ppred_busy_0 = io_req_bits_uop_ppred_busy; // @[functional-unit.scala:290:7] wire [6:0] io_req_bits_uop_stale_pdst_0 = io_req_bits_uop_stale_pdst; // @[functional-unit.scala:290:7] wire io_req_bits_uop_exception_0 = io_req_bits_uop_exception; // @[functional-unit.scala:290:7] wire [63:0] io_req_bits_uop_exc_cause_0 = io_req_bits_uop_exc_cause; // @[functional-unit.scala:290:7] wire io_req_bits_uop_bypassable_0 = io_req_bits_uop_bypassable; // @[functional-unit.scala:290:7] wire [4:0] io_req_bits_uop_mem_cmd_0 = io_req_bits_uop_mem_cmd; // @[functional-unit.scala:290:7] wire [1:0] io_req_bits_uop_mem_size_0 = io_req_bits_uop_mem_size; // @[functional-unit.scala:290:7] wire io_req_bits_uop_mem_signed_0 = io_req_bits_uop_mem_signed; // @[functional-unit.scala:290:7] wire io_req_bits_uop_is_fence_0 = io_req_bits_uop_is_fence; // @[functional-unit.scala:290:7] wire io_req_bits_uop_is_fencei_0 = io_req_bits_uop_is_fencei; // @[functional-unit.scala:290:7] wire io_req_bits_uop_is_amo_0 = io_req_bits_uop_is_amo; // @[functional-unit.scala:290:7] wire io_req_bits_uop_uses_ldq_0 = io_req_bits_uop_uses_ldq; // @[functional-unit.scala:290:7] wire io_req_bits_uop_uses_stq_0 = io_req_bits_uop_uses_stq; // @[functional-unit.scala:290:7] wire io_req_bits_uop_is_sys_pc2epc_0 = io_req_bits_uop_is_sys_pc2epc; // @[functional-unit.scala:290:7] wire io_req_bits_uop_is_unique_0 = io_req_bits_uop_is_unique; // @[functional-unit.scala:290:7] wire io_req_bits_uop_flush_on_commit_0 = io_req_bits_uop_flush_on_commit; // @[functional-unit.scala:290:7] wire io_req_bits_uop_ldst_is_rs1_0 = io_req_bits_uop_ldst_is_rs1; // @[functional-unit.scala:290:7] wire [5:0] io_req_bits_uop_ldst_0 = io_req_bits_uop_ldst; // @[functional-unit.scala:290:7] wire [5:0] io_req_bits_uop_lrs1_0 = io_req_bits_uop_lrs1; // @[functional-unit.scala:290:7] wire [5:0] io_req_bits_uop_lrs2_0 = io_req_bits_uop_lrs2; // @[functional-unit.scala:290:7] wire [5:0] io_req_bits_uop_lrs3_0 = io_req_bits_uop_lrs3; // @[functional-unit.scala:290:7] wire io_req_bits_uop_ldst_val_0 = io_req_bits_uop_ldst_val; // @[functional-unit.scala:290:7] wire [1:0] io_req_bits_uop_dst_rtype_0 = io_req_bits_uop_dst_rtype; // @[functional-unit.scala:290:7] wire [1:0] io_req_bits_uop_lrs1_rtype_0 = io_req_bits_uop_lrs1_rtype; // @[functional-unit.scala:290:7] wire [1:0] io_req_bits_uop_lrs2_rtype_0 = io_req_bits_uop_lrs2_rtype; // @[functional-unit.scala:290:7] wire io_req_bits_uop_frs3_en_0 = io_req_bits_uop_frs3_en; // @[functional-unit.scala:290:7] wire io_req_bits_uop_fp_val_0 = io_req_bits_uop_fp_val; // @[functional-unit.scala:290:7] wire io_req_bits_uop_fp_single_0 = io_req_bits_uop_fp_single; // @[functional-unit.scala:290:7] wire io_req_bits_uop_xcpt_pf_if_0 = io_req_bits_uop_xcpt_pf_if; // @[functional-unit.scala:290:7] wire io_req_bits_uop_xcpt_ae_if_0 = io_req_bits_uop_xcpt_ae_if; // @[functional-unit.scala:290:7] wire io_req_bits_uop_xcpt_ma_if_0 = io_req_bits_uop_xcpt_ma_if; // @[functional-unit.scala:290:7] wire io_req_bits_uop_bp_debug_if_0 = io_req_bits_uop_bp_debug_if; // @[functional-unit.scala:290:7] wire io_req_bits_uop_bp_xcpt_if_0 = io_req_bits_uop_bp_xcpt_if; // @[functional-unit.scala:290:7] wire [1:0] io_req_bits_uop_debug_fsrc_0 = io_req_bits_uop_debug_fsrc; // @[functional-unit.scala:290:7] wire [1:0] io_req_bits_uop_debug_tsrc_0 = io_req_bits_uop_debug_tsrc; // @[functional-unit.scala:290:7] wire [63:0] io_req_bits_rs1_data_0 = io_req_bits_rs1_data; // @[functional-unit.scala:290:7] wire [63:0] io_req_bits_rs2_data_0 = io_req_bits_rs2_data; // @[functional-unit.scala:290:7] wire io_req_bits_kill_0 = io_req_bits_kill; // @[functional-unit.scala:290:7] wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[functional-unit.scala:290:7] wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[functional-unit.scala:290:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[functional-unit.scala:290:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[functional-unit.scala:290:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[functional-unit.scala:290:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[functional-unit.scala:290:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[functional-unit.scala:290:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[functional-unit.scala:290:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[functional-unit.scala:290:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[functional-unit.scala:290:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[functional-unit.scala:290:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[functional-unit.scala:290:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[functional-unit.scala:290:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[functional-unit.scala:290:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[functional-unit.scala:290:7] wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[functional-unit.scala:290:7] wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[functional-unit.scala:290:7] wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[functional-unit.scala:290:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[functional-unit.scala:290:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[functional-unit.scala:290:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[functional-unit.scala:290:7] wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[functional-unit.scala:290:7] wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[functional-unit.scala:290:7] wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[functional-unit.scala:290:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[functional-unit.scala:290:7] wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[functional-unit.scala:290:7] wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[functional-unit.scala:290:7] wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[functional-unit.scala:290:7] wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[functional-unit.scala:290:7] wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[functional-unit.scala:290:7] wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[functional-unit.scala:290:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[functional-unit.scala:290:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[functional-unit.scala:290:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[functional-unit.scala:290:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[functional-unit.scala:290:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[functional-unit.scala:290:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[functional-unit.scala:290:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[functional-unit.scala:290:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[functional-unit.scala:290:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[functional-unit.scala:290:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[functional-unit.scala:290:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[functional-unit.scala:290:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[functional-unit.scala:290:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[functional-unit.scala:290:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[functional-unit.scala:290:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[functional-unit.scala:290:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[functional-unit.scala:290:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[functional-unit.scala:290:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[functional-unit.scala:290:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[functional-unit.scala:290:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[functional-unit.scala:290:7] wire io_get_ftq_pc_entry_cfi_idx_valid_0 = io_get_ftq_pc_entry_cfi_idx_valid; // @[functional-unit.scala:290:7] wire [2:0] io_get_ftq_pc_entry_cfi_idx_bits_0 = io_get_ftq_pc_entry_cfi_idx_bits; // @[functional-unit.scala:290:7] wire io_get_ftq_pc_entry_cfi_taken_0 = io_get_ftq_pc_entry_cfi_taken; // @[functional-unit.scala:290:7] wire io_get_ftq_pc_entry_cfi_mispredicted_0 = io_get_ftq_pc_entry_cfi_mispredicted; // @[functional-unit.scala:290:7] wire [2:0] io_get_ftq_pc_entry_cfi_type_0 = io_get_ftq_pc_entry_cfi_type; // @[functional-unit.scala:290:7] wire [7:0] io_get_ftq_pc_entry_br_mask_0 = io_get_ftq_pc_entry_br_mask; // @[functional-unit.scala:290:7] wire io_get_ftq_pc_entry_cfi_is_call_0 = io_get_ftq_pc_entry_cfi_is_call; // @[functional-unit.scala:290:7] wire io_get_ftq_pc_entry_cfi_is_ret_0 = io_get_ftq_pc_entry_cfi_is_ret; // @[functional-unit.scala:290:7] wire io_get_ftq_pc_entry_cfi_npc_plus4_0 = io_get_ftq_pc_entry_cfi_npc_plus4; // @[functional-unit.scala:290:7] wire [39:0] io_get_ftq_pc_entry_ras_top_0 = io_get_ftq_pc_entry_ras_top; // @[functional-unit.scala:290:7] wire [4:0] io_get_ftq_pc_entry_ras_idx_0 = io_get_ftq_pc_entry_ras_idx; // @[functional-unit.scala:290:7] wire io_get_ftq_pc_entry_start_bank_0 = io_get_ftq_pc_entry_start_bank; // @[functional-unit.scala:290:7] wire [39:0] io_get_ftq_pc_pc_0 = io_get_ftq_pc_pc; // @[functional-unit.scala:290:7] wire io_get_ftq_pc_next_val_0 = io_get_ftq_pc_next_val; // @[functional-unit.scala:290:7] wire [39:0] io_get_ftq_pc_next_pc_0 = io_get_ftq_pc_next_pc; // @[functional-unit.scala:290:7] wire [3:0] _cfi_idx_T_1 = 4'h8; // @[functional-unit.scala:422:82] wire [38:0] io_resp_bits_sfence_bits_addr = 39'h0; // @[functional-unit.scala:290:7] wire [24:0] io_resp_bits_mxcpt_bits = 25'h0; // @[functional-unit.scala:290:7] wire [11:0] io_resp_bits_fflags_bits_uop_csr_addr = 12'h0; // @[functional-unit.scala:290:7] wire [11:0] io_bypass_0_bits_fflags_bits_uop_csr_addr = 12'h0; // @[functional-unit.scala:290:7] wire [19:0] io_resp_bits_fflags_bits_uop_imm_packed = 20'h0; // @[functional-unit.scala:290:7] wire [19:0] io_bypass_0_bits_fflags_bits_uop_imm_packed = 20'h0; // @[functional-unit.scala:290:7] wire [5:0] io_resp_bits_fflags_bits_uop_pc_lob = 6'h0; // @[functional-unit.scala:290:7] wire [5:0] io_resp_bits_fflags_bits_uop_ldst = 6'h0; // @[functional-unit.scala:290:7] wire [5:0] io_resp_bits_fflags_bits_uop_lrs1 = 6'h0; // @[functional-unit.scala:290:7] wire [5:0] io_resp_bits_fflags_bits_uop_lrs2 = 6'h0; // @[functional-unit.scala:290:7] wire [5:0] io_resp_bits_fflags_bits_uop_lrs3 = 6'h0; // @[functional-unit.scala:290:7] wire [5:0] io_bypass_0_bits_fflags_bits_uop_pc_lob = 6'h0; // @[functional-unit.scala:290:7] wire [5:0] io_bypass_0_bits_fflags_bits_uop_ldst = 6'h0; // @[functional-unit.scala:290:7] wire [5:0] io_bypass_0_bits_fflags_bits_uop_lrs1 = 6'h0; // @[functional-unit.scala:290:7] wire [5:0] io_bypass_0_bits_fflags_bits_uop_lrs2 = 6'h0; // @[functional-unit.scala:290:7] wire [5:0] io_bypass_0_bits_fflags_bits_uop_lrs3 = 6'h0; // @[functional-unit.scala:290:7] wire [15:0] io_resp_bits_fflags_bits_uop_br_mask = 16'h0; // @[functional-unit.scala:290:7] wire [15:0] io_bypass_0_bits_fflags_bits_uop_br_mask = 16'h0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_fflags_bits_uop_ctrl_op_fcn = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_fflags_bits_uop_ftq_idx = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_fflags_bits_uop_ldq_idx = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_fflags_bits_uop_stq_idx = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_fflags_bits_uop_ppred = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_fflags_bits_uop_mem_cmd = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_fflags_bits_flags = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_ctrl_op_fcn = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_ftq_idx = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_ldq_idx = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_stq_idx = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_ppred = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_bypass_0_bits_fflags_bits_uop_mem_cmd = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_bypass_0_bits_fflags_bits_flags = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_get_ftq_pc_ftq_idx = 5'h0; // @[functional-unit.scala:290:7] wire [4:0] io_get_ftq_pc_ghist_ras_idx = 5'h0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_fflags_bits_uop_ctrl_op1_sel = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_fflags_bits_uop_iw_state = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_fflags_bits_uop_rxq_idx = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_fflags_bits_uop_mem_size = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_fflags_bits_uop_dst_rtype = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_fflags_bits_uop_lrs1_rtype = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_fflags_bits_uop_lrs2_rtype = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_fflags_bits_uop_debug_fsrc = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_fflags_bits_uop_debug_tsrc = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_ctrl_op1_sel = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_iw_state = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_rxq_idx = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_mem_size = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_dst_rtype = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_lrs1_rtype = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_lrs2_rtype = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_debug_fsrc = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] io_bypass_0_bits_fflags_bits_uop_debug_tsrc = 2'h0; // @[functional-unit.scala:290:7] wire [1:0] _pc_sel_T_10 = 2'h0; // @[functional-unit.scala:349:53] wire [3:0] io_resp_bits_fflags_bits_uop_ctrl_br_type = 4'h0; // @[functional-unit.scala:290:7] wire [3:0] io_resp_bits_fflags_bits_uop_br_tag = 4'h0; // @[functional-unit.scala:290:7] wire [3:0] io_bypass_0_bits_fflags_bits_uop_ctrl_br_type = 4'h0; // @[functional-unit.scala:290:7] wire [3:0] io_bypass_0_bits_fflags_bits_uop_br_tag = 4'h0; // @[functional-unit.scala:290:7] wire [9:0] io_resp_bits_fflags_bits_uop_fu_code = 10'h0; // @[functional-unit.scala:290:7] wire [9:0] io_bypass_0_bits_fflags_bits_uop_fu_code = 10'h0; // @[functional-unit.scala:290:7] wire [2:0] io_resp_bits_fflags_bits_uop_iq_type = 3'h0; // @[functional-unit.scala:290:7] wire [2:0] io_resp_bits_fflags_bits_uop_ctrl_op2_sel = 3'h0; // @[functional-unit.scala:290:7] wire [2:0] io_resp_bits_fflags_bits_uop_ctrl_imm_sel = 3'h0; // @[functional-unit.scala:290:7] wire [2:0] io_resp_bits_fflags_bits_uop_ctrl_csr_cmd = 3'h0; // @[functional-unit.scala:290:7] wire [2:0] io_bypass_0_bits_fflags_bits_uop_iq_type = 3'h0; // @[functional-unit.scala:290:7] wire [2:0] io_bypass_0_bits_fflags_bits_uop_ctrl_op2_sel = 3'h0; // @[functional-unit.scala:290:7] wire [2:0] io_bypass_0_bits_fflags_bits_uop_ctrl_imm_sel = 3'h0; // @[functional-unit.scala:290:7] wire [2:0] io_bypass_0_bits_fflags_bits_uop_ctrl_csr_cmd = 3'h0; // @[functional-unit.scala:290:7] wire [39:0] io_resp_bits_fflags_bits_uop_debug_pc = 40'h0; // @[functional-unit.scala:290:7] wire [39:0] io_resp_bits_addr = 40'h0; // @[functional-unit.scala:290:7] wire [39:0] io_bypass_0_bits_fflags_bits_uop_debug_pc = 40'h0; // @[functional-unit.scala:290:7] wire [39:0] io_get_ftq_pc_com_pc = 40'h0; // @[functional-unit.scala:290:7] wire [31:0] io_resp_bits_fflags_bits_uop_inst = 32'h0; // @[functional-unit.scala:290:7] wire [31:0] io_resp_bits_fflags_bits_uop_debug_inst = 32'h0; // @[functional-unit.scala:290:7] wire [31:0] io_bypass_0_bits_fflags_bits_uop_inst = 32'h0; // @[functional-unit.scala:290:7] wire [31:0] io_bypass_0_bits_fflags_bits_uop_debug_inst = 32'h0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_fflags_bits_uop_uopc = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_fflags_bits_uop_rob_idx = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_fflags_bits_uop_pdst = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_fflags_bits_uop_prs1 = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_fflags_bits_uop_prs2 = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_fflags_bits_uop_prs3 = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_fflags_bits_uop_stale_pdst = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_uopc = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_rob_idx = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_pdst = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_prs1 = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_prs2 = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_prs3 = 7'h0; // @[functional-unit.scala:290:7] wire [6:0] io_bypass_0_bits_fflags_bits_uop_stale_pdst = 7'h0; // @[functional-unit.scala:290:7] wire [63:0] io_req_bits_rs3_data = 64'h0; // @[functional-unit.scala:290:7] wire [63:0] io_resp_bits_fflags_bits_uop_exc_cause = 64'h0; // @[functional-unit.scala:290:7] wire [63:0] io_bypass_0_bits_fflags_bits_uop_exc_cause = 64'h0; // @[functional-unit.scala:290:7] wire [63:0] io_get_ftq_pc_ghist_old_history = 64'h0; // @[functional-unit.scala:290:7] wire io_req_ready = 1'h1; // @[functional-unit.scala:290:7] wire io_req_bits_pred_data = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_ready = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_predicated = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_valid = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_is_rvc = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_ctrl_fcn_dw = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_ctrl_is_load = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_ctrl_is_sta = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_ctrl_is_std = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_iw_p1_poisoned = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_iw_p2_poisoned = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_is_br = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_is_jalr = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_is_jal = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_is_sfb = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_edge_inst = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_taken = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_prs1_busy = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_prs2_busy = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_prs3_busy = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_ppred_busy = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_exception = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_bypassable = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_mem_signed = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_is_fence = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_is_fencei = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_is_amo = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_uses_ldq = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_uses_stq = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_is_sys_pc2epc = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_is_unique = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_flush_on_commit = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_ldst_is_rs1 = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_ldst_val = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_frs3_en = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_fp_val = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_fp_single = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_xcpt_pf_if = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_xcpt_ae_if = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_xcpt_ma_if = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_bp_debug_if = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_fflags_bits_uop_bp_xcpt_if = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_mxcpt_valid = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_sfence_valid = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_sfence_bits_rs1 = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_sfence_bits_rs2 = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_sfence_bits_asid = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_sfence_bits_hv = 1'h0; // @[functional-unit.scala:290:7] wire io_resp_bits_sfence_bits_hg = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_predicated = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_valid = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_is_rvc = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_ctrl_fcn_dw = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_ctrl_is_load = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_ctrl_is_sta = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_ctrl_is_std = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_iw_p1_poisoned = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_iw_p2_poisoned = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_is_br = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_is_jalr = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_is_jal = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_is_sfb = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_edge_inst = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_taken = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_prs1_busy = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_prs2_busy = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_prs3_busy = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_ppred_busy = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_exception = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_bypassable = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_mem_signed = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_is_fence = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_is_fencei = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_is_amo = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_uses_ldq = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_uses_stq = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_is_sys_pc2epc = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_is_unique = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_flush_on_commit = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_ldst_is_rs1 = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_ldst_val = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_frs3_en = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_fp_val = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_fp_single = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_xcpt_pf_if = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_xcpt_ae_if = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_xcpt_ma_if = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_bp_debug_if = 1'h0; // @[functional-unit.scala:290:7] wire io_bypass_0_bits_fflags_bits_uop_bp_xcpt_if = 1'h0; // @[functional-unit.scala:290:7] wire io_get_ftq_pc_ghist_current_saw_branch_not_taken = 1'h0; // @[functional-unit.scala:290:7] wire io_get_ftq_pc_ghist_new_saw_branch_not_taken = 1'h0; // @[functional-unit.scala:290:7] wire io_get_ftq_pc_ghist_new_saw_branch_taken = 1'h0; // @[functional-unit.scala:290:7] wire _r_valids_WIRE_0 = 1'h0; // @[functional-unit.scala:236:35] wire _r_val_WIRE_0 = 1'h0; // @[functional-unit.scala:446:31] wire _alu_out_T_2 = 1'h0; // @[micro-op.scala:110:43] wire _alu_out_T_3 = 1'h0; // @[functional-unit.scala:449:51] wire _r_data_0_T_1 = 1'h0; // @[micro-op.scala:109:42] wire _r_pred_0_T_2 = 1'h0; // @[micro-op.scala:110:43] wire _r_pred_0_T_3 = 1'h0; // @[functional-unit.scala:454:46] wire _io_bypass_0_bits_data_T_1 = 1'h0; // @[micro-op.scala:109:42] wire io_bypass_0_valid_0 = io_req_valid_0; // @[functional-unit.scala:290:7] wire [6:0] io_bypass_0_bits_uop_uopc_0 = io_req_bits_uop_uopc_0; // @[functional-unit.scala:290:7] wire [6:0] brinfo_uop_uopc = io_req_bits_uop_uopc_0; // @[functional-unit.scala:290:7, :385:20] wire [31:0] io_bypass_0_bits_uop_inst_0 = io_req_bits_uop_inst_0; // @[functional-unit.scala:290:7] wire [31:0] brinfo_uop_inst = io_req_bits_uop_inst_0; // @[functional-unit.scala:290:7, :385:20] wire [31:0] io_bypass_0_bits_uop_debug_inst_0 = io_req_bits_uop_debug_inst_0; // @[functional-unit.scala:290:7] wire [31:0] brinfo_uop_debug_inst = io_req_bits_uop_debug_inst_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_is_rvc_0 = io_req_bits_uop_is_rvc_0; // @[functional-unit.scala:290:7] wire brinfo_uop_is_rvc = io_req_bits_uop_is_rvc_0; // @[functional-unit.scala:290:7, :385:20] wire [39:0] io_bypass_0_bits_uop_debug_pc_0 = io_req_bits_uop_debug_pc_0; // @[functional-unit.scala:290:7] wire [39:0] brinfo_uop_debug_pc = io_req_bits_uop_debug_pc_0; // @[functional-unit.scala:290:7, :385:20] wire [2:0] io_bypass_0_bits_uop_iq_type_0 = io_req_bits_uop_iq_type_0; // @[functional-unit.scala:290:7] wire [2:0] brinfo_uop_iq_type = io_req_bits_uop_iq_type_0; // @[functional-unit.scala:290:7, :385:20] wire [9:0] io_bypass_0_bits_uop_fu_code_0 = io_req_bits_uop_fu_code_0; // @[functional-unit.scala:290:7] wire [9:0] brinfo_uop_fu_code = io_req_bits_uop_fu_code_0; // @[functional-unit.scala:290:7, :385:20] wire [3:0] io_bypass_0_bits_uop_ctrl_br_type_0 = io_req_bits_uop_ctrl_br_type_0; // @[functional-unit.scala:290:7] wire [3:0] brinfo_uop_ctrl_br_type = io_req_bits_uop_ctrl_br_type_0; // @[functional-unit.scala:290:7, :385:20] wire [1:0] io_bypass_0_bits_uop_ctrl_op1_sel_0 = io_req_bits_uop_ctrl_op1_sel_0; // @[functional-unit.scala:290:7] wire [1:0] brinfo_uop_ctrl_op1_sel = io_req_bits_uop_ctrl_op1_sel_0; // @[functional-unit.scala:290:7, :385:20] wire [2:0] io_bypass_0_bits_uop_ctrl_op2_sel_0 = io_req_bits_uop_ctrl_op2_sel_0; // @[functional-unit.scala:290:7] wire [2:0] brinfo_uop_ctrl_op2_sel = io_req_bits_uop_ctrl_op2_sel_0; // @[functional-unit.scala:290:7, :385:20] wire [2:0] io_bypass_0_bits_uop_ctrl_imm_sel_0 = io_req_bits_uop_ctrl_imm_sel_0; // @[functional-unit.scala:290:7] wire [2:0] brinfo_uop_ctrl_imm_sel = io_req_bits_uop_ctrl_imm_sel_0; // @[functional-unit.scala:290:7, :385:20] wire [4:0] io_bypass_0_bits_uop_ctrl_op_fcn_0 = io_req_bits_uop_ctrl_op_fcn_0; // @[functional-unit.scala:290:7] wire [4:0] brinfo_uop_ctrl_op_fcn = io_req_bits_uop_ctrl_op_fcn_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_ctrl_fcn_dw_0 = io_req_bits_uop_ctrl_fcn_dw_0; // @[functional-unit.scala:290:7] wire brinfo_uop_ctrl_fcn_dw = io_req_bits_uop_ctrl_fcn_dw_0; // @[functional-unit.scala:290:7, :385:20] wire [2:0] io_bypass_0_bits_uop_ctrl_csr_cmd_0 = io_req_bits_uop_ctrl_csr_cmd_0; // @[functional-unit.scala:290:7] wire [2:0] brinfo_uop_ctrl_csr_cmd = io_req_bits_uop_ctrl_csr_cmd_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_ctrl_is_load_0 = io_req_bits_uop_ctrl_is_load_0; // @[functional-unit.scala:290:7] wire brinfo_uop_ctrl_is_load = io_req_bits_uop_ctrl_is_load_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_ctrl_is_sta_0 = io_req_bits_uop_ctrl_is_sta_0; // @[functional-unit.scala:290:7] wire brinfo_uop_ctrl_is_sta = io_req_bits_uop_ctrl_is_sta_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_ctrl_is_std_0 = io_req_bits_uop_ctrl_is_std_0; // @[functional-unit.scala:290:7] wire brinfo_uop_ctrl_is_std = io_req_bits_uop_ctrl_is_std_0; // @[functional-unit.scala:290:7, :385:20] wire [1:0] io_bypass_0_bits_uop_iw_state_0 = io_req_bits_uop_iw_state_0; // @[functional-unit.scala:290:7] wire [1:0] brinfo_uop_iw_state = io_req_bits_uop_iw_state_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_iw_p1_poisoned_0 = io_req_bits_uop_iw_p1_poisoned_0; // @[functional-unit.scala:290:7] wire brinfo_uop_iw_p1_poisoned = io_req_bits_uop_iw_p1_poisoned_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_iw_p2_poisoned_0 = io_req_bits_uop_iw_p2_poisoned_0; // @[functional-unit.scala:290:7] wire brinfo_uop_iw_p2_poisoned = io_req_bits_uop_iw_p2_poisoned_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_is_br_0 = io_req_bits_uop_is_br_0; // @[functional-unit.scala:290:7] wire brinfo_uop_is_br = io_req_bits_uop_is_br_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_is_jalr_0 = io_req_bits_uop_is_jalr_0; // @[functional-unit.scala:290:7] wire brinfo_uop_is_jalr = io_req_bits_uop_is_jalr_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_is_jal_0 = io_req_bits_uop_is_jal_0; // @[functional-unit.scala:290:7] wire brinfo_uop_is_jal = io_req_bits_uop_is_jal_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_is_sfb_0 = io_req_bits_uop_is_sfb_0; // @[functional-unit.scala:290:7] wire brinfo_uop_is_sfb = io_req_bits_uop_is_sfb_0; // @[functional-unit.scala:290:7, :385:20] wire [15:0] io_bypass_0_bits_uop_br_mask_0 = io_req_bits_uop_br_mask_0; // @[functional-unit.scala:290:7] wire [15:0] brinfo_uop_br_mask = io_req_bits_uop_br_mask_0; // @[functional-unit.scala:290:7, :385:20] wire [3:0] io_bypass_0_bits_uop_br_tag_0 = io_req_bits_uop_br_tag_0; // @[functional-unit.scala:290:7] wire [3:0] brinfo_uop_br_tag = io_req_bits_uop_br_tag_0; // @[functional-unit.scala:290:7, :385:20] wire [4:0] io_bypass_0_bits_uop_ftq_idx_0 = io_req_bits_uop_ftq_idx_0; // @[functional-unit.scala:290:7] wire [4:0] brinfo_uop_ftq_idx = io_req_bits_uop_ftq_idx_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_edge_inst_0 = io_req_bits_uop_edge_inst_0; // @[functional-unit.scala:290:7] wire brinfo_uop_edge_inst = io_req_bits_uop_edge_inst_0; // @[functional-unit.scala:290:7, :385:20] wire [5:0] io_bypass_0_bits_uop_pc_lob_0 = io_req_bits_uop_pc_lob_0; // @[functional-unit.scala:290:7] wire [5:0] brinfo_uop_pc_lob = io_req_bits_uop_pc_lob_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_taken_0 = io_req_bits_uop_taken_0; // @[functional-unit.scala:290:7] wire brinfo_uop_taken = io_req_bits_uop_taken_0; // @[functional-unit.scala:290:7, :385:20] wire [19:0] io_bypass_0_bits_uop_imm_packed_0 = io_req_bits_uop_imm_packed_0; // @[functional-unit.scala:290:7] wire [19:0] brinfo_uop_imm_packed = io_req_bits_uop_imm_packed_0; // @[functional-unit.scala:290:7, :385:20] wire [11:0] io_bypass_0_bits_uop_csr_addr_0 = io_req_bits_uop_csr_addr_0; // @[functional-unit.scala:290:7] wire [11:0] brinfo_uop_csr_addr = io_req_bits_uop_csr_addr_0; // @[functional-unit.scala:290:7, :385:20] wire [6:0] io_bypass_0_bits_uop_rob_idx_0 = io_req_bits_uop_rob_idx_0; // @[functional-unit.scala:290:7] wire [6:0] brinfo_uop_rob_idx = io_req_bits_uop_rob_idx_0; // @[functional-unit.scala:290:7, :385:20] wire [4:0] io_bypass_0_bits_uop_ldq_idx_0 = io_req_bits_uop_ldq_idx_0; // @[functional-unit.scala:290:7] wire [4:0] brinfo_uop_ldq_idx = io_req_bits_uop_ldq_idx_0; // @[functional-unit.scala:290:7, :385:20] wire [4:0] io_bypass_0_bits_uop_stq_idx_0 = io_req_bits_uop_stq_idx_0; // @[functional-unit.scala:290:7] wire [4:0] brinfo_uop_stq_idx = io_req_bits_uop_stq_idx_0; // @[functional-unit.scala:290:7, :385:20] wire [1:0] io_bypass_0_bits_uop_rxq_idx_0 = io_req_bits_uop_rxq_idx_0; // @[functional-unit.scala:290:7] wire [1:0] brinfo_uop_rxq_idx = io_req_bits_uop_rxq_idx_0; // @[functional-unit.scala:290:7, :385:20] wire [6:0] io_bypass_0_bits_uop_pdst_0 = io_req_bits_uop_pdst_0; // @[functional-unit.scala:290:7] wire [6:0] brinfo_uop_pdst = io_req_bits_uop_pdst_0; // @[functional-unit.scala:290:7, :385:20] wire [6:0] io_bypass_0_bits_uop_prs1_0 = io_req_bits_uop_prs1_0; // @[functional-unit.scala:290:7] wire [6:0] brinfo_uop_prs1 = io_req_bits_uop_prs1_0; // @[functional-unit.scala:290:7, :385:20] wire [6:0] io_bypass_0_bits_uop_prs2_0 = io_req_bits_uop_prs2_0; // @[functional-unit.scala:290:7] wire [6:0] brinfo_uop_prs2 = io_req_bits_uop_prs2_0; // @[functional-unit.scala:290:7, :385:20] wire [6:0] io_bypass_0_bits_uop_prs3_0 = io_req_bits_uop_prs3_0; // @[functional-unit.scala:290:7] wire [6:0] brinfo_uop_prs3 = io_req_bits_uop_prs3_0; // @[functional-unit.scala:290:7, :385:20] wire [4:0] io_bypass_0_bits_uop_ppred_0 = io_req_bits_uop_ppred_0; // @[functional-unit.scala:290:7] wire [4:0] brinfo_uop_ppred = io_req_bits_uop_ppred_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_prs1_busy_0 = io_req_bits_uop_prs1_busy_0; // @[functional-unit.scala:290:7] wire brinfo_uop_prs1_busy = io_req_bits_uop_prs1_busy_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_prs2_busy_0 = io_req_bits_uop_prs2_busy_0; // @[functional-unit.scala:290:7] wire brinfo_uop_prs2_busy = io_req_bits_uop_prs2_busy_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_prs3_busy_0 = io_req_bits_uop_prs3_busy_0; // @[functional-unit.scala:290:7] wire brinfo_uop_prs3_busy = io_req_bits_uop_prs3_busy_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_ppred_busy_0 = io_req_bits_uop_ppred_busy_0; // @[functional-unit.scala:290:7] wire brinfo_uop_ppred_busy = io_req_bits_uop_ppred_busy_0; // @[functional-unit.scala:290:7, :385:20] wire [6:0] io_bypass_0_bits_uop_stale_pdst_0 = io_req_bits_uop_stale_pdst_0; // @[functional-unit.scala:290:7] wire [6:0] brinfo_uop_stale_pdst = io_req_bits_uop_stale_pdst_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_exception_0 = io_req_bits_uop_exception_0; // @[functional-unit.scala:290:7] wire brinfo_uop_exception = io_req_bits_uop_exception_0; // @[functional-unit.scala:290:7, :385:20] wire [63:0] io_bypass_0_bits_uop_exc_cause_0 = io_req_bits_uop_exc_cause_0; // @[functional-unit.scala:290:7] wire [63:0] brinfo_uop_exc_cause = io_req_bits_uop_exc_cause_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_bypassable_0 = io_req_bits_uop_bypassable_0; // @[functional-unit.scala:290:7] wire brinfo_uop_bypassable = io_req_bits_uop_bypassable_0; // @[functional-unit.scala:290:7, :385:20] wire [4:0] io_bypass_0_bits_uop_mem_cmd_0 = io_req_bits_uop_mem_cmd_0; // @[functional-unit.scala:290:7] wire [4:0] brinfo_uop_mem_cmd = io_req_bits_uop_mem_cmd_0; // @[functional-unit.scala:290:7, :385:20] wire [1:0] io_bypass_0_bits_uop_mem_size_0 = io_req_bits_uop_mem_size_0; // @[functional-unit.scala:290:7] wire [1:0] brinfo_uop_mem_size = io_req_bits_uop_mem_size_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_mem_signed_0 = io_req_bits_uop_mem_signed_0; // @[functional-unit.scala:290:7] wire brinfo_uop_mem_signed = io_req_bits_uop_mem_signed_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_is_fence_0 = io_req_bits_uop_is_fence_0; // @[functional-unit.scala:290:7] wire brinfo_uop_is_fence = io_req_bits_uop_is_fence_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_is_fencei_0 = io_req_bits_uop_is_fencei_0; // @[functional-unit.scala:290:7] wire brinfo_uop_is_fencei = io_req_bits_uop_is_fencei_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_is_amo_0 = io_req_bits_uop_is_amo_0; // @[functional-unit.scala:290:7] wire brinfo_uop_is_amo = io_req_bits_uop_is_amo_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_uses_ldq_0 = io_req_bits_uop_uses_ldq_0; // @[functional-unit.scala:290:7] wire brinfo_uop_uses_ldq = io_req_bits_uop_uses_ldq_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_uses_stq_0 = io_req_bits_uop_uses_stq_0; // @[functional-unit.scala:290:7] wire brinfo_uop_uses_stq = io_req_bits_uop_uses_stq_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_is_sys_pc2epc_0 = io_req_bits_uop_is_sys_pc2epc_0; // @[functional-unit.scala:290:7] wire brinfo_uop_is_sys_pc2epc = io_req_bits_uop_is_sys_pc2epc_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_is_unique_0 = io_req_bits_uop_is_unique_0; // @[functional-unit.scala:290:7] wire brinfo_uop_is_unique = io_req_bits_uop_is_unique_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_flush_on_commit_0 = io_req_bits_uop_flush_on_commit_0; // @[functional-unit.scala:290:7] wire brinfo_uop_flush_on_commit = io_req_bits_uop_flush_on_commit_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_ldst_is_rs1_0 = io_req_bits_uop_ldst_is_rs1_0; // @[functional-unit.scala:290:7] wire brinfo_uop_ldst_is_rs1 = io_req_bits_uop_ldst_is_rs1_0; // @[functional-unit.scala:290:7, :385:20] wire [5:0] io_bypass_0_bits_uop_ldst_0 = io_req_bits_uop_ldst_0; // @[functional-unit.scala:290:7] wire [5:0] brinfo_uop_ldst = io_req_bits_uop_ldst_0; // @[functional-unit.scala:290:7, :385:20] wire [5:0] io_bypass_0_bits_uop_lrs1_0 = io_req_bits_uop_lrs1_0; // @[functional-unit.scala:290:7] wire [5:0] brinfo_uop_lrs1 = io_req_bits_uop_lrs1_0; // @[functional-unit.scala:290:7, :385:20] wire [5:0] io_bypass_0_bits_uop_lrs2_0 = io_req_bits_uop_lrs2_0; // @[functional-unit.scala:290:7] wire [5:0] brinfo_uop_lrs2 = io_req_bits_uop_lrs2_0; // @[functional-unit.scala:290:7, :385:20] wire [5:0] io_bypass_0_bits_uop_lrs3_0 = io_req_bits_uop_lrs3_0; // @[functional-unit.scala:290:7] wire [5:0] brinfo_uop_lrs3 = io_req_bits_uop_lrs3_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_ldst_val_0 = io_req_bits_uop_ldst_val_0; // @[functional-unit.scala:290:7] wire brinfo_uop_ldst_val = io_req_bits_uop_ldst_val_0; // @[functional-unit.scala:290:7, :385:20] wire [1:0] io_bypass_0_bits_uop_dst_rtype_0 = io_req_bits_uop_dst_rtype_0; // @[functional-unit.scala:290:7] wire [1:0] brinfo_uop_dst_rtype = io_req_bits_uop_dst_rtype_0; // @[functional-unit.scala:290:7, :385:20] wire [1:0] io_bypass_0_bits_uop_lrs1_rtype_0 = io_req_bits_uop_lrs1_rtype_0; // @[functional-unit.scala:290:7] wire [1:0] brinfo_uop_lrs1_rtype = io_req_bits_uop_lrs1_rtype_0; // @[functional-unit.scala:290:7, :385:20] wire [1:0] io_bypass_0_bits_uop_lrs2_rtype_0 = io_req_bits_uop_lrs2_rtype_0; // @[functional-unit.scala:290:7] wire [1:0] brinfo_uop_lrs2_rtype = io_req_bits_uop_lrs2_rtype_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_frs3_en_0 = io_req_bits_uop_frs3_en_0; // @[functional-unit.scala:290:7] wire brinfo_uop_frs3_en = io_req_bits_uop_frs3_en_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_fp_val_0 = io_req_bits_uop_fp_val_0; // @[functional-unit.scala:290:7] wire brinfo_uop_fp_val = io_req_bits_uop_fp_val_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_fp_single_0 = io_req_bits_uop_fp_single_0; // @[functional-unit.scala:290:7] wire brinfo_uop_fp_single = io_req_bits_uop_fp_single_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_xcpt_pf_if_0 = io_req_bits_uop_xcpt_pf_if_0; // @[functional-unit.scala:290:7] wire brinfo_uop_xcpt_pf_if = io_req_bits_uop_xcpt_pf_if_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_xcpt_ae_if_0 = io_req_bits_uop_xcpt_ae_if_0; // @[functional-unit.scala:290:7] wire brinfo_uop_xcpt_ae_if = io_req_bits_uop_xcpt_ae_if_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_xcpt_ma_if_0 = io_req_bits_uop_xcpt_ma_if_0; // @[functional-unit.scala:290:7] wire brinfo_uop_xcpt_ma_if = io_req_bits_uop_xcpt_ma_if_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_bp_debug_if_0 = io_req_bits_uop_bp_debug_if_0; // @[functional-unit.scala:290:7] wire brinfo_uop_bp_debug_if = io_req_bits_uop_bp_debug_if_0; // @[functional-unit.scala:290:7, :385:20] wire io_bypass_0_bits_uop_bp_xcpt_if_0 = io_req_bits_uop_bp_xcpt_if_0; // @[functional-unit.scala:290:7] wire brinfo_uop_bp_xcpt_if = io_req_bits_uop_bp_xcpt_if_0; // @[functional-unit.scala:290:7, :385:20] wire [1:0] io_bypass_0_bits_uop_debug_fsrc_0 = io_req_bits_uop_debug_fsrc_0; // @[functional-unit.scala:290:7] wire [1:0] brinfo_uop_debug_fsrc = io_req_bits_uop_debug_fsrc_0; // @[functional-unit.scala:290:7, :385:20] wire [1:0] io_bypass_0_bits_uop_debug_tsrc_0 = io_req_bits_uop_debug_tsrc_0; // @[functional-unit.scala:290:7] wire [1:0] brinfo_uop_debug_tsrc = io_req_bits_uop_debug_tsrc_0; // @[functional-unit.scala:290:7, :385:20] wire [63:0] jalr_target_base = io_req_bits_rs1_data_0; // @[functional-unit.scala:290:7, :416:49] wire _io_resp_valid_T_3; // @[functional-unit.scala:257:47] wire [15:0] _io_resp_bits_uop_br_mask_T_1; // @[util.scala:85:25] wire [63:0] _io_bypass_0_bits_data_T_3; // @[functional-unit.scala:467:32] wire brinfo_valid; // @[functional-unit.scala:385:20] wire brinfo_mispredict; // @[functional-unit.scala:385:20] wire brinfo_taken; // @[functional-unit.scala:385:20] wire [2:0] brinfo_cfi_type; // @[functional-unit.scala:385:20] wire [1:0] brinfo_pc_sel; // @[functional-unit.scala:385:20] wire [39:0] brinfo_jalr_target; // @[functional-unit.scala:385:20] wire [20:0] brinfo_target_offset; // @[functional-unit.scala:385:20] wire _cfi_idx_T = io_get_ftq_pc_entry_start_bank_0; // @[functional-unit.scala:290:7, :422:69] wire [3:0] io_resp_bits_uop_ctrl_br_type_0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_uop_ctrl_op1_sel_0; // @[functional-unit.scala:290:7] wire [2:0] io_resp_bits_uop_ctrl_op2_sel_0; // @[functional-unit.scala:290:7] wire [2:0] io_resp_bits_uop_ctrl_imm_sel_0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_uop_ctrl_op_fcn_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_ctrl_fcn_dw_0; // @[functional-unit.scala:290:7] wire [2:0] io_resp_bits_uop_ctrl_csr_cmd_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_ctrl_is_load_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_ctrl_is_sta_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_ctrl_is_std_0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_uop_uopc_0; // @[functional-unit.scala:290:7] wire [31:0] io_resp_bits_uop_inst_0; // @[functional-unit.scala:290:7] wire [31:0] io_resp_bits_uop_debug_inst_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_is_rvc_0; // @[functional-unit.scala:290:7] wire [39:0] io_resp_bits_uop_debug_pc_0; // @[functional-unit.scala:290:7] wire [2:0] io_resp_bits_uop_iq_type_0; // @[functional-unit.scala:290:7] wire [9:0] io_resp_bits_uop_fu_code_0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_uop_iw_state_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_iw_p1_poisoned_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_iw_p2_poisoned_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_is_br_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_is_jalr_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_is_jal_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_is_sfb_0; // @[functional-unit.scala:290:7] wire [15:0] io_resp_bits_uop_br_mask_0; // @[functional-unit.scala:290:7] wire [3:0] io_resp_bits_uop_br_tag_0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_uop_ftq_idx_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_edge_inst_0; // @[functional-unit.scala:290:7] wire [5:0] io_resp_bits_uop_pc_lob_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_taken_0; // @[functional-unit.scala:290:7] wire [19:0] io_resp_bits_uop_imm_packed_0; // @[functional-unit.scala:290:7] wire [11:0] io_resp_bits_uop_csr_addr_0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_uop_rob_idx_0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_uop_ldq_idx_0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_uop_stq_idx_0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_uop_rxq_idx_0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_uop_pdst_0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_uop_prs1_0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_uop_prs2_0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_uop_prs3_0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_uop_ppred_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_prs1_busy_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_prs2_busy_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_prs3_busy_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_ppred_busy_0; // @[functional-unit.scala:290:7] wire [6:0] io_resp_bits_uop_stale_pdst_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_exception_0; // @[functional-unit.scala:290:7] wire [63:0] io_resp_bits_uop_exc_cause_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_bypassable_0; // @[functional-unit.scala:290:7] wire [4:0] io_resp_bits_uop_mem_cmd_0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_uop_mem_size_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_mem_signed_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_is_fence_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_is_fencei_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_is_amo_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_uses_ldq_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_uses_stq_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_is_sys_pc2epc_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_is_unique_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_flush_on_commit_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_ldst_is_rs1_0; // @[functional-unit.scala:290:7] wire [5:0] io_resp_bits_uop_ldst_0; // @[functional-unit.scala:290:7] wire [5:0] io_resp_bits_uop_lrs1_0; // @[functional-unit.scala:290:7] wire [5:0] io_resp_bits_uop_lrs2_0; // @[functional-unit.scala:290:7] wire [5:0] io_resp_bits_uop_lrs3_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_ldst_val_0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_uop_dst_rtype_0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_uop_lrs1_rtype_0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_uop_lrs2_rtype_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_frs3_en_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_fp_val_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_fp_single_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_xcpt_pf_if_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_xcpt_ae_if_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_xcpt_ma_if_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_bp_debug_if_0; // @[functional-unit.scala:290:7] wire io_resp_bits_uop_bp_xcpt_if_0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_uop_debug_fsrc_0; // @[functional-unit.scala:290:7] wire [1:0] io_resp_bits_uop_debug_tsrc_0; // @[functional-unit.scala:290:7] wire [63:0] io_resp_bits_data_0; // @[functional-unit.scala:290:7] wire io_resp_valid_0; // @[functional-unit.scala:290:7] wire [63:0] io_bypass_0_bits_data_0; // @[functional-unit.scala:290:7] wire [3:0] io_brinfo_uop_ctrl_br_type_0; // @[functional-unit.scala:290:7] wire [1:0] io_brinfo_uop_ctrl_op1_sel_0; // @[functional-unit.scala:290:7] wire [2:0] io_brinfo_uop_ctrl_op2_sel_0; // @[functional-unit.scala:290:7] wire [2:0] io_brinfo_uop_ctrl_imm_sel_0; // @[functional-unit.scala:290:7] wire [4:0] io_brinfo_uop_ctrl_op_fcn_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_ctrl_fcn_dw_0; // @[functional-unit.scala:290:7] wire [2:0] io_brinfo_uop_ctrl_csr_cmd_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_ctrl_is_load_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_ctrl_is_sta_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_ctrl_is_std_0; // @[functional-unit.scala:290:7] wire [6:0] io_brinfo_uop_uopc_0; // @[functional-unit.scala:290:7] wire [31:0] io_brinfo_uop_inst_0; // @[functional-unit.scala:290:7] wire [31:0] io_brinfo_uop_debug_inst_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_is_rvc_0; // @[functional-unit.scala:290:7] wire [39:0] io_brinfo_uop_debug_pc_0; // @[functional-unit.scala:290:7] wire [2:0] io_brinfo_uop_iq_type_0; // @[functional-unit.scala:290:7] wire [9:0] io_brinfo_uop_fu_code_0; // @[functional-unit.scala:290:7] wire [1:0] io_brinfo_uop_iw_state_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_iw_p1_poisoned_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_iw_p2_poisoned_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_is_br_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_is_jalr_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_is_jal_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_is_sfb_0; // @[functional-unit.scala:290:7] wire [15:0] io_brinfo_uop_br_mask_0; // @[functional-unit.scala:290:7] wire [3:0] io_brinfo_uop_br_tag_0; // @[functional-unit.scala:290:7] wire [4:0] io_brinfo_uop_ftq_idx_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_edge_inst_0; // @[functional-unit.scala:290:7] wire [5:0] io_brinfo_uop_pc_lob_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_taken_0; // @[functional-unit.scala:290:7] wire [19:0] io_brinfo_uop_imm_packed_0; // @[functional-unit.scala:290:7] wire [11:0] io_brinfo_uop_csr_addr_0; // @[functional-unit.scala:290:7] wire [6:0] io_brinfo_uop_rob_idx_0; // @[functional-unit.scala:290:7] wire [4:0] io_brinfo_uop_ldq_idx_0; // @[functional-unit.scala:290:7] wire [4:0] io_brinfo_uop_stq_idx_0; // @[functional-unit.scala:290:7] wire [1:0] io_brinfo_uop_rxq_idx_0; // @[functional-unit.scala:290:7] wire [6:0] io_brinfo_uop_pdst_0; // @[functional-unit.scala:290:7] wire [6:0] io_brinfo_uop_prs1_0; // @[functional-unit.scala:290:7] wire [6:0] io_brinfo_uop_prs2_0; // @[functional-unit.scala:290:7] wire [6:0] io_brinfo_uop_prs3_0; // @[functional-unit.scala:290:7] wire [4:0] io_brinfo_uop_ppred_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_prs1_busy_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_prs2_busy_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_prs3_busy_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_ppred_busy_0; // @[functional-unit.scala:290:7] wire [6:0] io_brinfo_uop_stale_pdst_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_exception_0; // @[functional-unit.scala:290:7] wire [63:0] io_brinfo_uop_exc_cause_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_bypassable_0; // @[functional-unit.scala:290:7] wire [4:0] io_brinfo_uop_mem_cmd_0; // @[functional-unit.scala:290:7] wire [1:0] io_brinfo_uop_mem_size_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_mem_signed_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_is_fence_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_is_fencei_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_is_amo_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_uses_ldq_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_uses_stq_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_is_sys_pc2epc_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_is_unique_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_flush_on_commit_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_ldst_is_rs1_0; // @[functional-unit.scala:290:7] wire [5:0] io_brinfo_uop_ldst_0; // @[functional-unit.scala:290:7] wire [5:0] io_brinfo_uop_lrs1_0; // @[functional-unit.scala:290:7] wire [5:0] io_brinfo_uop_lrs2_0; // @[functional-unit.scala:290:7] wire [5:0] io_brinfo_uop_lrs3_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_ldst_val_0; // @[functional-unit.scala:290:7] wire [1:0] io_brinfo_uop_dst_rtype_0; // @[functional-unit.scala:290:7] wire [1:0] io_brinfo_uop_lrs1_rtype_0; // @[functional-unit.scala:290:7] wire [1:0] io_brinfo_uop_lrs2_rtype_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_frs3_en_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_fp_val_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_fp_single_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_xcpt_pf_if_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_xcpt_ae_if_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_xcpt_ma_if_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_bp_debug_if_0; // @[functional-unit.scala:290:7] wire io_brinfo_uop_bp_xcpt_if_0; // @[functional-unit.scala:290:7] wire [1:0] io_brinfo_uop_debug_fsrc_0; // @[functional-unit.scala:290:7] wire [1:0] io_brinfo_uop_debug_tsrc_0; // @[functional-unit.scala:290:7] wire io_brinfo_valid_0; // @[functional-unit.scala:290:7] wire io_brinfo_mispredict_0; // @[functional-unit.scala:290:7] wire io_brinfo_taken_0; // @[functional-unit.scala:290:7] wire [2:0] io_brinfo_cfi_type_0; // @[functional-unit.scala:290:7] wire [1:0] io_brinfo_pc_sel_0; // @[functional-unit.scala:290:7] wire [39:0] io_brinfo_jalr_target_0; // @[functional-unit.scala:290:7] wire [20:0] io_brinfo_target_offset_0; // @[functional-unit.scala:290:7] reg r_valids_0; // @[functional-unit.scala:236:27] reg [6:0] r_uops_0_uopc; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_uopc_0 = r_uops_0_uopc; // @[functional-unit.scala:237:23, :290:7] reg [31:0] r_uops_0_inst; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_inst_0 = r_uops_0_inst; // @[functional-unit.scala:237:23, :290:7] reg [31:0] r_uops_0_debug_inst; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_debug_inst_0 = r_uops_0_debug_inst; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_is_rvc; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_is_rvc_0 = r_uops_0_is_rvc; // @[functional-unit.scala:237:23, :290:7] reg [39:0] r_uops_0_debug_pc; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_debug_pc_0 = r_uops_0_debug_pc; // @[functional-unit.scala:237:23, :290:7] reg [2:0] r_uops_0_iq_type; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_iq_type_0 = r_uops_0_iq_type; // @[functional-unit.scala:237:23, :290:7] reg [9:0] r_uops_0_fu_code; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_fu_code_0 = r_uops_0_fu_code; // @[functional-unit.scala:237:23, :290:7] reg [3:0] r_uops_0_ctrl_br_type; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ctrl_br_type_0 = r_uops_0_ctrl_br_type; // @[functional-unit.scala:237:23, :290:7] reg [1:0] r_uops_0_ctrl_op1_sel; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ctrl_op1_sel_0 = r_uops_0_ctrl_op1_sel; // @[functional-unit.scala:237:23, :290:7] reg [2:0] r_uops_0_ctrl_op2_sel; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ctrl_op2_sel_0 = r_uops_0_ctrl_op2_sel; // @[functional-unit.scala:237:23, :290:7] reg [2:0] r_uops_0_ctrl_imm_sel; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ctrl_imm_sel_0 = r_uops_0_ctrl_imm_sel; // @[functional-unit.scala:237:23, :290:7] reg [4:0] r_uops_0_ctrl_op_fcn; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ctrl_op_fcn_0 = r_uops_0_ctrl_op_fcn; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_ctrl_fcn_dw; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ctrl_fcn_dw_0 = r_uops_0_ctrl_fcn_dw; // @[functional-unit.scala:237:23, :290:7] reg [2:0] r_uops_0_ctrl_csr_cmd; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ctrl_csr_cmd_0 = r_uops_0_ctrl_csr_cmd; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_ctrl_is_load; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ctrl_is_load_0 = r_uops_0_ctrl_is_load; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_ctrl_is_sta; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ctrl_is_sta_0 = r_uops_0_ctrl_is_sta; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_ctrl_is_std; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ctrl_is_std_0 = r_uops_0_ctrl_is_std; // @[functional-unit.scala:237:23, :290:7] reg [1:0] r_uops_0_iw_state; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_iw_state_0 = r_uops_0_iw_state; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_iw_p1_poisoned; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_iw_p1_poisoned_0 = r_uops_0_iw_p1_poisoned; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_iw_p2_poisoned; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_iw_p2_poisoned_0 = r_uops_0_iw_p2_poisoned; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_is_br; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_is_br_0 = r_uops_0_is_br; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_is_jalr; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_is_jalr_0 = r_uops_0_is_jalr; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_is_jal; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_is_jal_0 = r_uops_0_is_jal; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_is_sfb; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_is_sfb_0 = r_uops_0_is_sfb; // @[functional-unit.scala:237:23, :290:7] reg [15:0] r_uops_0_br_mask; // @[functional-unit.scala:237:23] reg [3:0] r_uops_0_br_tag; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_br_tag_0 = r_uops_0_br_tag; // @[functional-unit.scala:237:23, :290:7] reg [4:0] r_uops_0_ftq_idx; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ftq_idx_0 = r_uops_0_ftq_idx; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_edge_inst; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_edge_inst_0 = r_uops_0_edge_inst; // @[functional-unit.scala:237:23, :290:7] reg [5:0] r_uops_0_pc_lob; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_pc_lob_0 = r_uops_0_pc_lob; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_taken; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_taken_0 = r_uops_0_taken; // @[functional-unit.scala:237:23, :290:7] reg [19:0] r_uops_0_imm_packed; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_imm_packed_0 = r_uops_0_imm_packed; // @[functional-unit.scala:237:23, :290:7] reg [11:0] r_uops_0_csr_addr; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_csr_addr_0 = r_uops_0_csr_addr; // @[functional-unit.scala:237:23, :290:7] reg [6:0] r_uops_0_rob_idx; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_rob_idx_0 = r_uops_0_rob_idx; // @[functional-unit.scala:237:23, :290:7] reg [4:0] r_uops_0_ldq_idx; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ldq_idx_0 = r_uops_0_ldq_idx; // @[functional-unit.scala:237:23, :290:7] reg [4:0] r_uops_0_stq_idx; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_stq_idx_0 = r_uops_0_stq_idx; // @[functional-unit.scala:237:23, :290:7] reg [1:0] r_uops_0_rxq_idx; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_rxq_idx_0 = r_uops_0_rxq_idx; // @[functional-unit.scala:237:23, :290:7] reg [6:0] r_uops_0_pdst; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_pdst_0 = r_uops_0_pdst; // @[functional-unit.scala:237:23, :290:7] reg [6:0] r_uops_0_prs1; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_prs1_0 = r_uops_0_prs1; // @[functional-unit.scala:237:23, :290:7] reg [6:0] r_uops_0_prs2; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_prs2_0 = r_uops_0_prs2; // @[functional-unit.scala:237:23, :290:7] reg [6:0] r_uops_0_prs3; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_prs3_0 = r_uops_0_prs3; // @[functional-unit.scala:237:23, :290:7] reg [4:0] r_uops_0_ppred; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ppred_0 = r_uops_0_ppred; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_prs1_busy; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_prs1_busy_0 = r_uops_0_prs1_busy; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_prs2_busy; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_prs2_busy_0 = r_uops_0_prs2_busy; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_prs3_busy; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_prs3_busy_0 = r_uops_0_prs3_busy; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_ppred_busy; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ppred_busy_0 = r_uops_0_ppred_busy; // @[functional-unit.scala:237:23, :290:7] reg [6:0] r_uops_0_stale_pdst; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_stale_pdst_0 = r_uops_0_stale_pdst; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_exception; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_exception_0 = r_uops_0_exception; // @[functional-unit.scala:237:23, :290:7] reg [63:0] r_uops_0_exc_cause; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_exc_cause_0 = r_uops_0_exc_cause; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_bypassable; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_bypassable_0 = r_uops_0_bypassable; // @[functional-unit.scala:237:23, :290:7] reg [4:0] r_uops_0_mem_cmd; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_mem_cmd_0 = r_uops_0_mem_cmd; // @[functional-unit.scala:237:23, :290:7] reg [1:0] r_uops_0_mem_size; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_mem_size_0 = r_uops_0_mem_size; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_mem_signed; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_mem_signed_0 = r_uops_0_mem_signed; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_is_fence; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_is_fence_0 = r_uops_0_is_fence; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_is_fencei; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_is_fencei_0 = r_uops_0_is_fencei; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_is_amo; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_is_amo_0 = r_uops_0_is_amo; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_uses_ldq; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_uses_ldq_0 = r_uops_0_uses_ldq; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_uses_stq; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_uses_stq_0 = r_uops_0_uses_stq; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_is_sys_pc2epc; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_is_sys_pc2epc_0 = r_uops_0_is_sys_pc2epc; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_is_unique; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_is_unique_0 = r_uops_0_is_unique; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_flush_on_commit; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_flush_on_commit_0 = r_uops_0_flush_on_commit; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_ldst_is_rs1; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ldst_is_rs1_0 = r_uops_0_ldst_is_rs1; // @[functional-unit.scala:237:23, :290:7] reg [5:0] r_uops_0_ldst; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ldst_0 = r_uops_0_ldst; // @[functional-unit.scala:237:23, :290:7] reg [5:0] r_uops_0_lrs1; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_lrs1_0 = r_uops_0_lrs1; // @[functional-unit.scala:237:23, :290:7] reg [5:0] r_uops_0_lrs2; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_lrs2_0 = r_uops_0_lrs2; // @[functional-unit.scala:237:23, :290:7] reg [5:0] r_uops_0_lrs3; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_lrs3_0 = r_uops_0_lrs3; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_ldst_val; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_ldst_val_0 = r_uops_0_ldst_val; // @[functional-unit.scala:237:23, :290:7] reg [1:0] r_uops_0_dst_rtype; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_dst_rtype_0 = r_uops_0_dst_rtype; // @[functional-unit.scala:237:23, :290:7] reg [1:0] r_uops_0_lrs1_rtype; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_lrs1_rtype_0 = r_uops_0_lrs1_rtype; // @[functional-unit.scala:237:23, :290:7] reg [1:0] r_uops_0_lrs2_rtype; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_lrs2_rtype_0 = r_uops_0_lrs2_rtype; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_frs3_en; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_frs3_en_0 = r_uops_0_frs3_en; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_fp_val; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_fp_val_0 = r_uops_0_fp_val; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_fp_single; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_fp_single_0 = r_uops_0_fp_single; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_xcpt_pf_if; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_xcpt_pf_if_0 = r_uops_0_xcpt_pf_if; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_xcpt_ae_if; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_xcpt_ae_if_0 = r_uops_0_xcpt_ae_if; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_xcpt_ma_if; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_xcpt_ma_if_0 = r_uops_0_xcpt_ma_if; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_bp_debug_if; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_bp_debug_if_0 = r_uops_0_bp_debug_if; // @[functional-unit.scala:237:23, :290:7] reg r_uops_0_bp_xcpt_if; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_bp_xcpt_if_0 = r_uops_0_bp_xcpt_if; // @[functional-unit.scala:237:23, :290:7] reg [1:0] r_uops_0_debug_fsrc; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_debug_fsrc_0 = r_uops_0_debug_fsrc; // @[functional-unit.scala:237:23, :290:7] reg [1:0] r_uops_0_debug_tsrc; // @[functional-unit.scala:237:23] assign io_resp_bits_uop_debug_tsrc_0 = r_uops_0_debug_tsrc; // @[functional-unit.scala:237:23, :290:7] wire [15:0] _r_valids_0_T = io_brupdate_b1_mispredict_mask_0 & io_req_bits_uop_br_mask_0; // @[util.scala:118:51] wire _r_valids_0_T_1 = |_r_valids_0_T; // @[util.scala:118:{51,59}] wire _r_valids_0_T_2 = ~_r_valids_0_T_1; // @[util.scala:118:59] wire _r_valids_0_T_3 = io_req_valid_0 & _r_valids_0_T_2; // @[functional-unit.scala:240:{33,36}, :290:7] wire _r_valids_0_T_4 = ~io_req_bits_kill_0; // @[functional-unit.scala:240:87, :290:7] wire _r_valids_0_T_5 = _r_valids_0_T_3 & _r_valids_0_T_4; // @[functional-unit.scala:240:{33,84,87}] wire [15:0] _r_uops_0_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:85:27] wire [15:0] _r_uops_0_br_mask_T_1 = io_req_bits_uop_br_mask_0 & _r_uops_0_br_mask_T; // @[util.scala:85:{25,27}] wire [15:0] _io_resp_valid_T = io_brupdate_b1_mispredict_mask_0 & r_uops_0_br_mask; // @[util.scala:118:51] wire _io_resp_valid_T_1 = |_io_resp_valid_T; // @[util.scala:118:{51,59}] wire _io_resp_valid_T_2 = ~_io_resp_valid_T_1; // @[util.scala:118:59] assign _io_resp_valid_T_3 = r_valids_0 & _io_resp_valid_T_2; // @[functional-unit.scala:236:27, :257:{47,50}] assign io_resp_valid_0 = _io_resp_valid_T_3; // @[functional-unit.scala:257:47, :290:7] wire [15:0] _io_resp_bits_uop_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:85:27] assign _io_resp_bits_uop_br_mask_T_1 = r_uops_0_br_mask & _io_resp_bits_uop_br_mask_T; // @[util.scala:85:{25,27}] assign io_resp_bits_uop_br_mask_0 = _io_resp_bits_uop_br_mask_T_1; // @[util.scala:85:25] wire _imm_xprlen_sign_T = io_req_bits_uop_imm_packed_0[19]; // @[util.scala:273:18] wire imm_xprlen_sign = _imm_xprlen_sign_T; // @[util.scala:273:{18,37}] wire imm_xprlen_hi_hi_hi = imm_xprlen_sign; // @[util.scala:273:37, :282:15] wire _GEN = io_req_bits_uop_ctrl_imm_sel_0 == 3'h3; // @[util.scala:274:27] wire _imm_xprlen_i30_20_T; // @[util.scala:274:27] assign _imm_xprlen_i30_20_T = _GEN; // @[util.scala:274:27] wire _imm_xprlen_i19_12_T; // @[util.scala:275:27] assign _imm_xprlen_i19_12_T = _GEN; // @[util.scala:274:27, :275:27] wire _imm_xprlen_i11_T; // @[util.scala:276:27] assign _imm_xprlen_i11_T = _GEN; // @[util.scala:274:27, :276:27] wire _imm_xprlen_i10_5_T; // @[util.scala:278:27] assign _imm_xprlen_i10_5_T = _GEN; // @[util.scala:274:27, :278:27] wire _imm_xprlen_i4_1_T; // @[util.scala:279:27] assign _imm_xprlen_i4_1_T = _GEN; // @[util.scala:274:27, :279:27] wire [10:0] _imm_xprlen_i30_20_T_1 = io_req_bits_uop_imm_packed_0[18:8]; // @[util.scala:274:39] wire [10:0] _imm_xprlen_i30_20_T_2 = _imm_xprlen_i30_20_T_1; // @[util.scala:274:{39,46}] wire [10:0] imm_xprlen_i30_20 = _imm_xprlen_i30_20_T ? _imm_xprlen_i30_20_T_2 : {11{imm_xprlen_sign}}; // @[util.scala:273:37, :274:{21,27,46}] wire [10:0] imm_xprlen_hi_hi_lo = imm_xprlen_i30_20; // @[util.scala:274:21, :282:15] wire _GEN_0 = io_req_bits_uop_ctrl_imm_sel_0 == 3'h4; // @[util.scala:275:44] wire _imm_xprlen_i19_12_T_1; // @[util.scala:275:44] assign _imm_xprlen_i19_12_T_1 = _GEN_0; // @[util.scala:275:44] wire _imm_xprlen_i11_T_1; // @[util.scala:277:27] assign _imm_xprlen_i11_T_1 = _GEN_0; // @[util.scala:275:44, :277:27] wire _imm_xprlen_i19_12_T_2 = _imm_xprlen_i19_12_T | _imm_xprlen_i19_12_T_1; // @[util.scala:275:{27,36,44}] wire [7:0] _imm_xprlen_i19_12_T_3 = io_req_bits_uop_imm_packed_0[7:0]; // @[util.scala:275:56] wire [7:0] _imm_xprlen_i19_12_T_4 = _imm_xprlen_i19_12_T_3; // @[util.scala:275:{56,62}] wire [7:0] imm_xprlen_i19_12 = _imm_xprlen_i19_12_T_2 ? _imm_xprlen_i19_12_T_4 : {8{imm_xprlen_sign}}; // @[util.scala:273:37, :275:{21,36,62}] wire [7:0] imm_xprlen_hi_lo_hi = imm_xprlen_i19_12; // @[util.scala:275:21, :282:15] wire _imm_xprlen_i11_T_2 = io_req_bits_uop_ctrl_imm_sel_0 == 3'h2; // @[util.scala:277:44] wire _imm_xprlen_i11_T_3 = _imm_xprlen_i11_T_1 | _imm_xprlen_i11_T_2; // @[util.scala:277:{27,36,44}] wire _imm_xprlen_i11_T_4 = io_req_bits_uop_imm_packed_0[8]; // @[util.scala:277:56] wire _imm_xprlen_i0_T_3 = io_req_bits_uop_imm_packed_0[8]; // @[util.scala:277:56, :280:56] wire _imm_xprlen_i11_T_5 = _imm_xprlen_i11_T_4; // @[util.scala:277:{56,60}] wire _imm_xprlen_i11_T_6 = _imm_xprlen_i11_T_3 ? _imm_xprlen_i11_T_5 : imm_xprlen_sign; // @[util.scala:273:37, :277:{21,36,60}] wire imm_xprlen_i11 = ~_imm_xprlen_i11_T & _imm_xprlen_i11_T_6; // @[util.scala:276:{21,27}, :277:21] wire imm_xprlen_hi_lo_lo = imm_xprlen_i11; // @[util.scala:276:21, :282:15] wire [4:0] _imm_xprlen_i10_5_T_1 = io_req_bits_uop_imm_packed_0[18:14]; // @[util.scala:278:44] wire [4:0] _imm_xprlen_i10_5_T_2 = _imm_xprlen_i10_5_T_1; // @[util.scala:278:{44,52}] wire [4:0] imm_xprlen_i10_5 = _imm_xprlen_i10_5_T ? 5'h0 : _imm_xprlen_i10_5_T_2; // @[util.scala:278:{21,27,52}] wire [4:0] imm_xprlen_lo_hi_hi = imm_xprlen_i10_5; // @[util.scala:278:21, :282:15] wire [4:0] _imm_xprlen_i4_1_T_1 = io_req_bits_uop_imm_packed_0[13:9]; // @[util.scala:279:44] wire [4:0] _imm_xprlen_i4_1_T_2 = _imm_xprlen_i4_1_T_1; // @[util.scala:279:{44,51}] wire [4:0] imm_xprlen_i4_1 = _imm_xprlen_i4_1_T ? 5'h0 : _imm_xprlen_i4_1_T_2; // @[util.scala:279:{21,27,51}] wire [4:0] imm_xprlen_lo_hi_lo = imm_xprlen_i4_1; // @[util.scala:279:21, :282:15] wire _imm_xprlen_i0_T = io_req_bits_uop_ctrl_imm_sel_0 == 3'h1; // @[util.scala:280:27] wire _imm_xprlen_i0_T_1 = io_req_bits_uop_ctrl_imm_sel_0 == 3'h0; // @[util.scala:280:44] wire _imm_xprlen_i0_T_2 = _imm_xprlen_i0_T | _imm_xprlen_i0_T_1; // @[util.scala:280:{27,36,44}] wire _imm_xprlen_i0_T_4 = _imm_xprlen_i0_T_3; // @[util.scala:280:{56,60}] wire imm_xprlen_i0 = _imm_xprlen_i0_T_2 & _imm_xprlen_i0_T_4; // @[util.scala:280:{21,36,60}] wire imm_xprlen_lo_lo = imm_xprlen_i0; // @[util.scala:280:21, :282:15] wire [9:0] imm_xprlen_lo_hi = {imm_xprlen_lo_hi_hi, imm_xprlen_lo_hi_lo}; // @[util.scala:282:15] wire [10:0] imm_xprlen_lo = {imm_xprlen_lo_hi, imm_xprlen_lo_lo}; // @[util.scala:282:15] wire [8:0] imm_xprlen_hi_lo = {imm_xprlen_hi_lo_hi, imm_xprlen_hi_lo_lo}; // @[util.scala:282:15] wire [11:0] imm_xprlen_hi_hi = {imm_xprlen_hi_hi_hi, imm_xprlen_hi_hi_lo}; // @[util.scala:282:15] wire [20:0] imm_xprlen_hi = {imm_xprlen_hi_hi, imm_xprlen_hi_lo}; // @[util.scala:282:15] wire [31:0] _imm_xprlen_T = {imm_xprlen_hi, imm_xprlen_lo}; // @[util.scala:282:15] wire [31:0] imm_xprlen = _imm_xprlen_T; // @[util.scala:282:{15,60}] wire [31:0] _op2_data_T_1 = imm_xprlen; // @[util.scala:282:60] wire [39:0] _block_pc_T = ~io_get_ftq_pc_pc_0; // @[util.scala:237:7] wire [39:0] _block_pc_T_1 = {_block_pc_T[39:6], 6'h3F}; // @[util.scala:237:{7,11}] wire [39:0] block_pc = ~_block_pc_T_1; // @[util.scala:237:{5,11}] wire [39:0] _uop_pc_T = {block_pc[39:6], block_pc[5:0] | io_req_bits_uop_pc_lob_0}; // @[util.scala:237:5] wire [1:0] _uop_pc_T_1 = {io_req_bits_uop_edge_inst_0, 1'h0}; // @[functional-unit.scala:290:7, :310:47] wire [40:0] _uop_pc_T_2 = {1'h0, _uop_pc_T} - {39'h0, _uop_pc_T_1}; // @[functional-unit.scala:310:{28,42,47}] wire [39:0] uop_pc = _uop_pc_T_2[39:0]; // @[functional-unit.scala:310:42] wire _op2_data_T = io_req_bits_uop_ctrl_op2_sel_0 == 3'h1; // @[functional-unit.scala:290:7, :321:39] wire _op2_data_T_2 = _op2_data_T_1[31]; // @[util.scala:261:46] wire [31:0] _op2_data_T_3 = {32{_op2_data_T_2}}; // @[util.scala:261:{25,46}] wire [63:0] _op2_data_T_4 = {_op2_data_T_3, _op2_data_T_1}; // @[util.scala:261:{20,25}] wire _op2_data_T_5 = io_req_bits_uop_ctrl_op2_sel_0 == 3'h4; // @[functional-unit.scala:290:7, :322:39] wire [4:0] _op2_data_T_6 = io_req_bits_uop_prs1_0[4:0]; // @[functional-unit.scala:290:7, :322:73] wire _op2_data_T_7 = io_req_bits_uop_ctrl_op2_sel_0 == 3'h0; // @[functional-unit.scala:290:7, :323:39] wire _op2_data_T_8 = io_req_bits_uop_ctrl_op2_sel_0 == 3'h3; // @[functional-unit.scala:290:7, :324:39] wire [2:0] _op2_data_T_9 = io_req_bits_uop_is_rvc_0 ? 3'h2 : 3'h4; // @[functional-unit.scala:290:7, :324:56] wire [2:0] _op2_data_T_10 = _op2_data_T_8 ? _op2_data_T_9 : 3'h0; // @[functional-unit.scala:324:{21,39,56}] wire [63:0] _op2_data_T_11 = _op2_data_T_7 ? io_req_bits_rs2_data_0 : {61'h0, _op2_data_T_10}; // @[functional-unit.scala:290:7, :323:{21,39}, :324:21] wire [63:0] _op2_data_T_12 = _op2_data_T_5 ? {59'h0, _op2_data_T_6} : _op2_data_T_11; // @[functional-unit.scala:322:{21,39,73}, :323:21] wire [63:0] op2_data = _op2_data_T ? _op2_data_T_4 : _op2_data_T_12; // @[util.scala:261:20] wire killed; // @[functional-unit.scala:337:24] assign killed = |{io_req_bits_kill_0, _r_valids_0_T}; // @[util.scala:118:{51,59}] wire br_eq = io_req_bits_rs1_data_0 == io_req_bits_rs2_data_0; // @[functional-unit.scala:290:7, :344:21] wire br_ltu = io_req_bits_rs1_data_0 < io_req_bits_rs2_data_0; // @[functional-unit.scala:290:7, :345:28] wire _br_lt_T = io_req_bits_rs1_data_0[63]; // @[functional-unit.scala:290:7, :346:22] wire _br_lt_T_5 = io_req_bits_rs1_data_0[63]; // @[functional-unit.scala:290:7, :346:22, :347:20] wire _br_lt_T_1 = io_req_bits_rs2_data_0[63]; // @[functional-unit.scala:290:7, :346:36] wire _br_lt_T_6 = io_req_bits_rs2_data_0[63]; // @[functional-unit.scala:290:7, :346:36, :347:35] wire _br_lt_T_2 = _br_lt_T ^ _br_lt_T_1; // @[functional-unit.scala:346:{22,31,36}] wire _br_lt_T_3 = ~_br_lt_T_2; // @[functional-unit.scala:346:{17,31}] wire _br_lt_T_4 = _br_lt_T_3 & br_ltu; // @[functional-unit.scala:345:28, :346:{17,46}] wire _br_lt_T_7 = ~_br_lt_T_6; // @[functional-unit.scala:347:{31,35}] wire _br_lt_T_8 = _br_lt_T_5 & _br_lt_T_7; // @[functional-unit.scala:347:{20,29,31}] wire br_lt = _br_lt_T_4 | _br_lt_T_8; // @[functional-unit.scala:346:{46,55}, :347:29] wire _pc_sel_T = ~br_eq; // @[functional-unit.scala:344:21, :351:39] wire [1:0] _pc_sel_T_1 = {1'h0, _pc_sel_T}; // @[functional-unit.scala:351:{38,39}] wire [1:0] _pc_sel_T_2 = {1'h0, br_eq}; // @[functional-unit.scala:344:21, :352:38] wire _pc_sel_T_3 = ~br_lt; // @[functional-unit.scala:346:55, :353:39] wire [1:0] _pc_sel_T_4 = {1'h0, _pc_sel_T_3}; // @[functional-unit.scala:353:{38,39}] wire _pc_sel_T_5 = ~br_ltu; // @[functional-unit.scala:345:28, :354:39] wire [1:0] _pc_sel_T_6 = {1'h0, _pc_sel_T_5}; // @[functional-unit.scala:354:{38,39}] wire [1:0] _pc_sel_T_7 = {1'h0, br_lt}; // @[functional-unit.scala:346:55, :355:38] wire [1:0] _pc_sel_T_8 = {1'h0, br_ltu}; // @[functional-unit.scala:345:28, :356:38] wire _pc_sel_T_9 = io_req_bits_uop_ctrl_br_type_0 == 4'h0; // @[functional-unit.scala:290:7, :349:53] wire _pc_sel_T_11 = io_req_bits_uop_ctrl_br_type_0 == 4'h1; // @[functional-unit.scala:290:7, :349:53] wire [1:0] _pc_sel_T_12 = _pc_sel_T_11 ? _pc_sel_T_1 : 2'h0; // @[functional-unit.scala:349:53, :351:38] wire _pc_sel_T_13 = io_req_bits_uop_ctrl_br_type_0 == 4'h2; // @[functional-unit.scala:290:7, :349:53] wire [1:0] _pc_sel_T_14 = _pc_sel_T_13 ? _pc_sel_T_2 : _pc_sel_T_12; // @[functional-unit.scala:349:53, :352:38] wire _pc_sel_T_15 = io_req_bits_uop_ctrl_br_type_0 == 4'h3; // @[functional-unit.scala:290:7, :349:53] wire [1:0] _pc_sel_T_16 = _pc_sel_T_15 ? _pc_sel_T_4 : _pc_sel_T_14; // @[functional-unit.scala:349:53, :353:38] wire _pc_sel_T_17 = io_req_bits_uop_ctrl_br_type_0 == 4'h4; // @[functional-unit.scala:290:7, :349:53] wire [1:0] _pc_sel_T_18 = _pc_sel_T_17 ? _pc_sel_T_6 : _pc_sel_T_16; // @[functional-unit.scala:349:53, :354:38] wire _pc_sel_T_19 = io_req_bits_uop_ctrl_br_type_0 == 4'h5; // @[functional-unit.scala:290:7, :349:53] wire [1:0] _pc_sel_T_20 = _pc_sel_T_19 ? _pc_sel_T_7 : _pc_sel_T_18; // @[functional-unit.scala:349:53, :355:38] wire _pc_sel_T_21 = io_req_bits_uop_ctrl_br_type_0 == 4'h6; // @[functional-unit.scala:290:7, :349:53] wire [1:0] _pc_sel_T_22 = _pc_sel_T_21 ? _pc_sel_T_8 : _pc_sel_T_20; // @[functional-unit.scala:349:53, :356:38] wire _pc_sel_T_23 = io_req_bits_uop_ctrl_br_type_0 == 4'h7; // @[functional-unit.scala:290:7, :349:53] wire [1:0] _pc_sel_T_24 = _pc_sel_T_23 ? 2'h1 : _pc_sel_T_22; // @[functional-unit.scala:349:53] wire _pc_sel_T_25 = io_req_bits_uop_ctrl_br_type_0 == 4'h8; // @[functional-unit.scala:290:7, :349:53] wire [1:0] pc_sel = _pc_sel_T_25 ? 2'h2 : _pc_sel_T_24; // @[functional-unit.scala:349:53] assign brinfo_pc_sel = pc_sel; // @[functional-unit.scala:349:53, :385:20] wire _is_taken_T = ~killed; // @[functional-unit.scala:337:24, :362:20] wire _is_taken_T_1 = io_req_valid_0 & _is_taken_T; // @[functional-unit.scala:290:7, :361:31, :362:20] wire _is_taken_T_2 = io_req_bits_uop_is_br_0 | io_req_bits_uop_is_jalr_0; // @[functional-unit.scala:290:7, :363:31] wire _is_taken_T_3 = _is_taken_T_2 | io_req_bits_uop_is_jal_0; // @[functional-unit.scala:290:7, :363:{31,46}] wire _is_taken_T_4 = _is_taken_T_1 & _is_taken_T_3; // @[functional-unit.scala:361:31, :362:28, :363:46] wire _is_taken_T_5 = |pc_sel; // @[functional-unit.scala:349:53, :364:28] wire is_taken = _is_taken_T_4 & _is_taken_T_5; // @[functional-unit.scala:362:28, :363:61, :364:28] assign brinfo_taken = is_taken; // @[functional-unit.scala:363:61, :385:20] wire mispredict; // @[functional-unit.scala:367:28] assign brinfo_mispredict = mispredict; // @[functional-unit.scala:367:28, :385:20] wire _is_br_T = ~killed; // @[functional-unit.scala:337:24, :362:20, :369:40] wire _is_br_T_1 = io_req_valid_0 & _is_br_T; // @[functional-unit.scala:290:7, :369:{37,40}] wire _is_br_T_2 = _is_br_T_1 & io_req_bits_uop_is_br_0; // @[functional-unit.scala:290:7, :369:{37,48}] wire _is_br_T_3 = ~io_req_bits_uop_is_sfb_0; // @[functional-unit.scala:290:7, :369:64] wire is_br = _is_br_T_2 & _is_br_T_3; // @[functional-unit.scala:369:{48,61,64}] wire _is_jal_T = ~killed; // @[functional-unit.scala:337:24, :362:20, :370:40] wire _is_jal_T_1 = io_req_valid_0 & _is_jal_T; // @[functional-unit.scala:290:7, :370:{37,40}] wire is_jal = _is_jal_T_1 & io_req_bits_uop_is_jal_0; // @[functional-unit.scala:290:7, :370:{37,48}] wire _is_jalr_T = ~killed; // @[functional-unit.scala:337:24, :362:20, :371:40] wire _is_jalr_T_1 = io_req_valid_0 & _is_jalr_T; // @[functional-unit.scala:290:7, :371:{37,40}] wire is_jalr = _is_jalr_T_1 & io_req_bits_uop_is_jalr_0; // @[functional-unit.scala:290:7, :371:{37,48}] wire _brinfo_valid_T = is_br | is_jalr; // @[functional-unit.scala:369:61, :371:48, :373:15, :388:34] wire _mispredict_T = ~io_req_bits_uop_taken_0; // @[functional-unit.scala:290:7, :381:21] assign io_brinfo_uop_uopc_0 = brinfo_uop_uopc; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_inst_0 = brinfo_uop_inst; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_debug_inst_0 = brinfo_uop_debug_inst; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_is_rvc_0 = brinfo_uop_is_rvc; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_debug_pc_0 = brinfo_uop_debug_pc; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_iq_type_0 = brinfo_uop_iq_type; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_fu_code_0 = brinfo_uop_fu_code; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ctrl_br_type_0 = brinfo_uop_ctrl_br_type; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ctrl_op1_sel_0 = brinfo_uop_ctrl_op1_sel; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ctrl_op2_sel_0 = brinfo_uop_ctrl_op2_sel; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ctrl_imm_sel_0 = brinfo_uop_ctrl_imm_sel; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ctrl_op_fcn_0 = brinfo_uop_ctrl_op_fcn; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ctrl_fcn_dw_0 = brinfo_uop_ctrl_fcn_dw; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ctrl_csr_cmd_0 = brinfo_uop_ctrl_csr_cmd; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ctrl_is_load_0 = brinfo_uop_ctrl_is_load; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ctrl_is_sta_0 = brinfo_uop_ctrl_is_sta; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ctrl_is_std_0 = brinfo_uop_ctrl_is_std; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_iw_state_0 = brinfo_uop_iw_state; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_iw_p1_poisoned_0 = brinfo_uop_iw_p1_poisoned; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_iw_p2_poisoned_0 = brinfo_uop_iw_p2_poisoned; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_is_br_0 = brinfo_uop_is_br; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_is_jalr_0 = brinfo_uop_is_jalr; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_is_jal_0 = brinfo_uop_is_jal; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_is_sfb_0 = brinfo_uop_is_sfb; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_br_mask_0 = brinfo_uop_br_mask; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_br_tag_0 = brinfo_uop_br_tag; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ftq_idx_0 = brinfo_uop_ftq_idx; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_edge_inst_0 = brinfo_uop_edge_inst; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_pc_lob_0 = brinfo_uop_pc_lob; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_taken_0 = brinfo_uop_taken; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_imm_packed_0 = brinfo_uop_imm_packed; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_csr_addr_0 = brinfo_uop_csr_addr; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_rob_idx_0 = brinfo_uop_rob_idx; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ldq_idx_0 = brinfo_uop_ldq_idx; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_stq_idx_0 = brinfo_uop_stq_idx; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_rxq_idx_0 = brinfo_uop_rxq_idx; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_pdst_0 = brinfo_uop_pdst; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_prs1_0 = brinfo_uop_prs1; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_prs2_0 = brinfo_uop_prs2; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_prs3_0 = brinfo_uop_prs3; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ppred_0 = brinfo_uop_ppred; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_prs1_busy_0 = brinfo_uop_prs1_busy; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_prs2_busy_0 = brinfo_uop_prs2_busy; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_prs3_busy_0 = brinfo_uop_prs3_busy; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ppred_busy_0 = brinfo_uop_ppred_busy; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_stale_pdst_0 = brinfo_uop_stale_pdst; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_exception_0 = brinfo_uop_exception; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_exc_cause_0 = brinfo_uop_exc_cause; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_bypassable_0 = brinfo_uop_bypassable; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_mem_cmd_0 = brinfo_uop_mem_cmd; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_mem_size_0 = brinfo_uop_mem_size; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_mem_signed_0 = brinfo_uop_mem_signed; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_is_fence_0 = brinfo_uop_is_fence; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_is_fencei_0 = brinfo_uop_is_fencei; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_is_amo_0 = brinfo_uop_is_amo; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_uses_ldq_0 = brinfo_uop_uses_ldq; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_uses_stq_0 = brinfo_uop_uses_stq; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_is_sys_pc2epc_0 = brinfo_uop_is_sys_pc2epc; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_is_unique_0 = brinfo_uop_is_unique; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_flush_on_commit_0 = brinfo_uop_flush_on_commit; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ldst_is_rs1_0 = brinfo_uop_ldst_is_rs1; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ldst_0 = brinfo_uop_ldst; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_lrs1_0 = brinfo_uop_lrs1; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_lrs2_0 = brinfo_uop_lrs2; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_lrs3_0 = brinfo_uop_lrs3; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_ldst_val_0 = brinfo_uop_ldst_val; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_dst_rtype_0 = brinfo_uop_dst_rtype; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_lrs1_rtype_0 = brinfo_uop_lrs1_rtype; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_lrs2_rtype_0 = brinfo_uop_lrs2_rtype; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_frs3_en_0 = brinfo_uop_frs3_en; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_fp_val_0 = brinfo_uop_fp_val; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_fp_single_0 = brinfo_uop_fp_single; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_xcpt_pf_if_0 = brinfo_uop_xcpt_pf_if; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_xcpt_ae_if_0 = brinfo_uop_xcpt_ae_if; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_xcpt_ma_if_0 = brinfo_uop_xcpt_ma_if; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_bp_debug_if_0 = brinfo_uop_bp_debug_if; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_bp_xcpt_if_0 = brinfo_uop_bp_xcpt_if; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_debug_fsrc_0 = brinfo_uop_debug_fsrc; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_uop_debug_tsrc_0 = brinfo_uop_debug_tsrc; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_valid_0 = brinfo_valid; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_mispredict_0 = brinfo_mispredict; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_taken_0 = brinfo_taken; // @[functional-unit.scala:290:7, :385:20] wire [2:0] _brinfo_cfi_type_T_1; // @[functional-unit.scala:391:31] assign io_brinfo_cfi_type_0 = brinfo_cfi_type; // @[functional-unit.scala:290:7, :385:20] assign io_brinfo_pc_sel_0 = brinfo_pc_sel; // @[functional-unit.scala:290:7, :385:20] wire [39:0] jalr_target; // @[functional-unit.scala:419:96] assign io_brinfo_jalr_target_0 = brinfo_jalr_target; // @[functional-unit.scala:290:7, :385:20] wire [20:0] target_offset; // @[functional-unit.scala:402:40] assign io_brinfo_target_offset_0 = brinfo_target_offset; // @[functional-unit.scala:290:7, :385:20] assign brinfo_valid = _brinfo_valid_T; // @[functional-unit.scala:385:20, :388:34] wire [2:0] _brinfo_cfi_type_T = {2'h0, is_br}; // @[functional-unit.scala:369:61, :392:31] assign _brinfo_cfi_type_T_1 = is_jalr ? 3'h3 : _brinfo_cfi_type_T; // @[functional-unit.scala:371:48, :391:31, :392:31] assign brinfo_cfi_type = _brinfo_cfi_type_T_1; // @[functional-unit.scala:385:20, :391:31] wire [20:0] _target_offset_T = imm_xprlen[20:0]; // @[util.scala:282:60] assign target_offset = _target_offset_T; // @[functional-unit.scala:402:{33,40}] assign brinfo_target_offset = target_offset; // @[functional-unit.scala:385:20, :402:40] wire [63:0] _jalr_target_xlen_T_3; // @[functional-unit.scala:418:60] wire [63:0] jalr_target_xlen; // @[functional-unit.scala:417:32] wire [63:0] _jalr_target_a_T = jalr_target_xlen; // @[functional-unit.scala:410:18, :417:32] wire [64:0] _jalr_target_xlen_T = {jalr_target_base[63], jalr_target_base} + {{44{target_offset[20]}}, target_offset}; // @[functional-unit.scala:402:40, :416:49, :418:43] wire [63:0] _jalr_target_xlen_T_1 = _jalr_target_xlen_T[63:0]; // @[functional-unit.scala:418:43] wire [63:0] _jalr_target_xlen_T_2 = _jalr_target_xlen_T_1; // @[functional-unit.scala:418:43] assign _jalr_target_xlen_T_3 = _jalr_target_xlen_T_2; // @[functional-unit.scala:418:{43,60}] assign jalr_target_xlen = _jalr_target_xlen_T_3; // @[functional-unit.scala:417:32, :418:60] wire [24:0] jalr_target_a = _jalr_target_a_T[63:39]; // @[functional-unit.scala:410:{18,25}] wire _jalr_target_msb_T = jalr_target_a == 25'h0; // @[functional-unit.scala:410:25, :411:23] wire _jalr_target_msb_T_1 = &jalr_target_a; // @[functional-unit.scala:410:25, :411:36] wire _jalr_target_msb_T_2 = _jalr_target_msb_T | _jalr_target_msb_T_1; // @[functional-unit.scala:411:{23,31,36}] wire _jalr_target_msb_T_3 = jalr_target_xlen[39]; // @[functional-unit.scala:411:48, :417:32] wire _jalr_target_msb_T_4 = jalr_target_xlen[38]; // @[functional-unit.scala:411:64, :417:32] wire _jalr_target_msb_T_5 = ~_jalr_target_msb_T_4; // @[functional-unit.scala:411:{61,64}] wire jalr_target_msb = _jalr_target_msb_T_2 ? _jalr_target_msb_T_3 : _jalr_target_msb_T_5; // @[functional-unit.scala:411:{20,31,48,61}] wire [38:0] _jalr_target_T = jalr_target_xlen[38:0]; // @[functional-unit.scala:412:18, :417:32] wire [39:0] _jalr_target_T_1 = {jalr_target_msb, _jalr_target_T}; // @[functional-unit.scala:411:20, :412:{10,18}] wire [39:0] _jalr_target_T_2 = _jalr_target_T_1; // @[functional-unit.scala:412:10, :419:81] wire [39:0] _jalr_target_T_3 = _jalr_target_T_2 & 40'hFFFFFFFFFE; // @[functional-unit.scala:419:{81,88}] wire [39:0] _jalr_target_T_4 = _jalr_target_T_3; // @[functional-unit.scala:419:88] assign jalr_target = _jalr_target_T_4; // @[functional-unit.scala:419:{88,96}] assign brinfo_jalr_target = jalr_target; // @[functional-unit.scala:385:20, :419:96] wire [3:0] _cfi_idx_T_2 = {_cfi_idx_T, 3'h0}; // @[functional-unit.scala:422:{37,69}] wire [5:0] _cfi_idx_T_3 = {io_req_bits_uop_pc_lob_0[5:4], io_req_bits_uop_pc_lob_0[3:0] ^ _cfi_idx_T_2}; // @[functional-unit.scala:290:7, :422:{32,37}] wire [2:0] cfi_idx = _cfi_idx_T_3[3:1]; // @[functional-unit.scala:422:{32,112}] wire _mispredict_T_1 = ~io_get_ftq_pc_next_val_0; // @[functional-unit.scala:290:7, :425:21] wire _mispredict_T_2 = io_get_ftq_pc_next_pc_0 != jalr_target; // @[functional-unit.scala:290:7, :419:96, :426:44] wire _mispredict_T_3 = _mispredict_T_1 | _mispredict_T_2; // @[functional-unit.scala:425:{21,45}, :426:44] wire _mispredict_T_4 = ~io_get_ftq_pc_entry_cfi_idx_valid_0; // @[functional-unit.scala:290:7, :427:21] wire _mispredict_T_5 = _mispredict_T_3 | _mispredict_T_4; // @[functional-unit.scala:425:45, :426:61, :427:21] wire _mispredict_T_6 = io_get_ftq_pc_entry_cfi_idx_bits_0 != cfi_idx; // @[functional-unit.scala:290:7, :422:112, :428:55] wire _mispredict_T_7 = _mispredict_T_5 | _mispredict_T_6; // @[functional-unit.scala:426:61, :427:56, :428:55] assign mispredict = pc_sel == 2'h2 ? _mispredict_T_7 : _brinfo_valid_T & (pc_sel == 2'h1 ? _mispredict_T : ~(|pc_sel) & io_req_bits_uop_taken_0); // @[functional-unit.scala:290:7, :349:53, :364:28, :367:28, :373:27, :377:{18,32}, :378:18, :380:{18,32}, :381:{18,21}, :388:34, :424:{18,31}, :425:18, :427:56] reg r_val_0; // @[functional-unit.scala:446:23] reg [63:0] r_data_0; // @[functional-unit.scala:447:19] assign io_resp_bits_data_0 = r_data_0; // @[functional-unit.scala:290:7, :447:19] wire _alu_out_T = ~io_req_bits_uop_is_br_0; // @[functional-unit.scala:290:7] wire _alu_out_T_1 = _alu_out_T & io_req_bits_uop_is_sfb_0; // @[functional-unit.scala:290:7] wire [63:0] _alu_out_T_4 = io_req_bits_uop_ldst_is_rs1_0 ? io_req_bits_rs1_data_0 : io_req_bits_rs2_data_0; // @[functional-unit.scala:290:7, :450:8] wire _alu_out_T_5 = io_req_bits_uop_uopc_0 == 7'h6D; // @[functional-unit.scala:290:7, :451:30] wire [63:0] _alu_out_T_6 = _alu_out_T_5 ? io_req_bits_rs2_data_0 : _alu_io_out; // @[functional-unit.scala:290:7, :327:19, :451:{8,30}] wire [63:0] alu_out = _alu_out_T_6; // @[functional-unit.scala:449:20, :451:8] wire [63:0] _r_data_0_T_3 = alu_out; // @[functional-unit.scala:449:20, :453:19] assign _io_bypass_0_bits_data_T_3 = alu_out; // @[functional-unit.scala:449:20, :467:32] wire _GEN_1 = io_req_bits_uop_is_br_0 & io_req_bits_uop_is_sfb_0; // @[functional-unit.scala:290:7] wire _r_data_0_T; // @[micro-op.scala:109:32] assign _r_data_0_T = _GEN_1; // @[micro-op.scala:109:32] wire _io_bypass_0_bits_data_T; // @[micro-op.scala:109:32] assign _io_bypass_0_bits_data_T = _GEN_1; // @[micro-op.scala:109:32] wire _GEN_2 = pc_sel == 2'h1; // @[functional-unit.scala:349:53, :453:54] wire _r_data_0_T_2; // @[functional-unit.scala:453:54] assign _r_data_0_T_2 = _GEN_2; // @[functional-unit.scala:453:54] wire _io_bypass_0_bits_data_T_2; // @[functional-unit.scala:467:67] assign _io_bypass_0_bits_data_T_2 = _GEN_2; // @[functional-unit.scala:453:54, :467:67] wire _r_pred_0_T = ~io_req_bits_uop_is_br_0; // @[functional-unit.scala:290:7] wire _r_pred_0_T_1 = _r_pred_0_T & io_req_bits_uop_is_sfb_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_data_0 = _io_bypass_0_bits_data_T_3; // @[functional-unit.scala:290:7, :467:32] always @(posedge clock) begin // @[functional-unit.scala:290:7] if (reset) begin // @[functional-unit.scala:290:7] r_valids_0 <= 1'h0; // @[functional-unit.scala:236:27] r_val_0 <= 1'h0; // @[functional-unit.scala:446:23] end else begin // @[functional-unit.scala:290:7] r_valids_0 <= _r_valids_0_T_5; // @[functional-unit.scala:236:27, :240:84] r_val_0 <= io_req_valid_0; // @[functional-unit.scala:290:7, :446:23] end r_uops_0_uopc <= io_req_bits_uop_uopc_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_inst <= io_req_bits_uop_inst_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_debug_inst <= io_req_bits_uop_debug_inst_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_is_rvc <= io_req_bits_uop_is_rvc_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_debug_pc <= io_req_bits_uop_debug_pc_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_iq_type <= io_req_bits_uop_iq_type_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_fu_code <= io_req_bits_uop_fu_code_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ctrl_br_type <= io_req_bits_uop_ctrl_br_type_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ctrl_op1_sel <= io_req_bits_uop_ctrl_op1_sel_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ctrl_op2_sel <= io_req_bits_uop_ctrl_op2_sel_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ctrl_imm_sel <= io_req_bits_uop_ctrl_imm_sel_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ctrl_op_fcn <= io_req_bits_uop_ctrl_op_fcn_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ctrl_fcn_dw <= io_req_bits_uop_ctrl_fcn_dw_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ctrl_csr_cmd <= io_req_bits_uop_ctrl_csr_cmd_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ctrl_is_load <= io_req_bits_uop_ctrl_is_load_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ctrl_is_sta <= io_req_bits_uop_ctrl_is_sta_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ctrl_is_std <= io_req_bits_uop_ctrl_is_std_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_iw_state <= io_req_bits_uop_iw_state_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_iw_p1_poisoned <= io_req_bits_uop_iw_p1_poisoned_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_iw_p2_poisoned <= io_req_bits_uop_iw_p2_poisoned_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_is_br <= io_req_bits_uop_is_br_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_is_jalr <= io_req_bits_uop_is_jalr_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_is_jal <= io_req_bits_uop_is_jal_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_is_sfb <= io_req_bits_uop_is_sfb_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_br_mask <= _r_uops_0_br_mask_T_1; // @[util.scala:85:25] r_uops_0_br_tag <= io_req_bits_uop_br_tag_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ftq_idx <= io_req_bits_uop_ftq_idx_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_edge_inst <= io_req_bits_uop_edge_inst_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_pc_lob <= io_req_bits_uop_pc_lob_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_taken <= io_req_bits_uop_taken_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_imm_packed <= io_req_bits_uop_imm_packed_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_csr_addr <= io_req_bits_uop_csr_addr_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_rob_idx <= io_req_bits_uop_rob_idx_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ldq_idx <= io_req_bits_uop_ldq_idx_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_stq_idx <= io_req_bits_uop_stq_idx_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_rxq_idx <= io_req_bits_uop_rxq_idx_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_pdst <= io_req_bits_uop_pdst_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_prs1 <= io_req_bits_uop_prs1_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_prs2 <= io_req_bits_uop_prs2_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_prs3 <= io_req_bits_uop_prs3_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ppred <= io_req_bits_uop_ppred_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_prs1_busy <= io_req_bits_uop_prs1_busy_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_prs2_busy <= io_req_bits_uop_prs2_busy_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_prs3_busy <= io_req_bits_uop_prs3_busy_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ppred_busy <= io_req_bits_uop_ppred_busy_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_stale_pdst <= io_req_bits_uop_stale_pdst_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_exception <= io_req_bits_uop_exception_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_exc_cause <= io_req_bits_uop_exc_cause_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_bypassable <= io_req_bits_uop_bypassable_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_mem_cmd <= io_req_bits_uop_mem_cmd_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_mem_size <= io_req_bits_uop_mem_size_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_mem_signed <= io_req_bits_uop_mem_signed_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_is_fence <= io_req_bits_uop_is_fence_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_is_fencei <= io_req_bits_uop_is_fencei_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_is_amo <= io_req_bits_uop_is_amo_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_uses_ldq <= io_req_bits_uop_uses_ldq_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_uses_stq <= io_req_bits_uop_uses_stq_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_is_sys_pc2epc <= io_req_bits_uop_is_sys_pc2epc_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_is_unique <= io_req_bits_uop_is_unique_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_flush_on_commit <= io_req_bits_uop_flush_on_commit_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ldst_is_rs1 <= io_req_bits_uop_ldst_is_rs1_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ldst <= io_req_bits_uop_ldst_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_lrs1 <= io_req_bits_uop_lrs1_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_lrs2 <= io_req_bits_uop_lrs2_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_lrs3 <= io_req_bits_uop_lrs3_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_ldst_val <= io_req_bits_uop_ldst_val_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_dst_rtype <= io_req_bits_uop_dst_rtype_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_lrs1_rtype <= io_req_bits_uop_lrs1_rtype_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_lrs2_rtype <= io_req_bits_uop_lrs2_rtype_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_frs3_en <= io_req_bits_uop_frs3_en_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_fp_val <= io_req_bits_uop_fp_val_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_fp_single <= io_req_bits_uop_fp_single_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_xcpt_pf_if <= io_req_bits_uop_xcpt_pf_if_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_xcpt_ae_if <= io_req_bits_uop_xcpt_ae_if_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_xcpt_ma_if <= io_req_bits_uop_xcpt_ma_if_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_bp_debug_if <= io_req_bits_uop_bp_debug_if_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_bp_xcpt_if <= io_req_bits_uop_bp_xcpt_if_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_debug_fsrc <= io_req_bits_uop_debug_fsrc_0; // @[functional-unit.scala:237:23, :290:7] r_uops_0_debug_tsrc <= io_req_bits_uop_debug_tsrc_0; // @[functional-unit.scala:237:23, :290:7] r_data_0 <= _r_data_0_T_3; // @[functional-unit.scala:447:19, :453:19] always @(posedge) ALU alu ( // @[functional-unit.scala:327:19] .clock (clock), .reset (reset), .io_dw (io_req_bits_uop_ctrl_fcn_dw_0), // @[functional-unit.scala:290:7] .io_fn (io_req_bits_uop_ctrl_op_fcn_0), // @[functional-unit.scala:290:7] .io_in2 (op2_data), // @[functional-unit.scala:321:21] .io_in1 (io_req_bits_uop_ctrl_op1_sel_0 == 2'h0 ? io_req_bits_rs1_data_0 : io_req_bits_uop_ctrl_op1_sel_0 == 2'h2 ? {{24{uop_pc[39]}}, uop_pc} : 64'h0), // @[util.scala:261:{20,25,46}] .io_out (_alu_io_out) ); // @[functional-unit.scala:327:19] assign io_resp_valid = io_resp_valid_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_uopc = io_resp_bits_uop_uopc_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_inst = io_resp_bits_uop_inst_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_debug_inst = io_resp_bits_uop_debug_inst_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_is_rvc = io_resp_bits_uop_is_rvc_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_debug_pc = io_resp_bits_uop_debug_pc_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_iq_type = io_resp_bits_uop_iq_type_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_fu_code = io_resp_bits_uop_fu_code_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ctrl_br_type = io_resp_bits_uop_ctrl_br_type_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ctrl_op1_sel = io_resp_bits_uop_ctrl_op1_sel_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ctrl_op2_sel = io_resp_bits_uop_ctrl_op2_sel_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ctrl_imm_sel = io_resp_bits_uop_ctrl_imm_sel_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ctrl_op_fcn = io_resp_bits_uop_ctrl_op_fcn_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ctrl_fcn_dw = io_resp_bits_uop_ctrl_fcn_dw_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ctrl_csr_cmd = io_resp_bits_uop_ctrl_csr_cmd_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ctrl_is_load = io_resp_bits_uop_ctrl_is_load_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ctrl_is_sta = io_resp_bits_uop_ctrl_is_sta_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ctrl_is_std = io_resp_bits_uop_ctrl_is_std_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_iw_state = io_resp_bits_uop_iw_state_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_iw_p1_poisoned = io_resp_bits_uop_iw_p1_poisoned_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_iw_p2_poisoned = io_resp_bits_uop_iw_p2_poisoned_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_is_br = io_resp_bits_uop_is_br_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_is_jalr = io_resp_bits_uop_is_jalr_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_is_jal = io_resp_bits_uop_is_jal_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_is_sfb = io_resp_bits_uop_is_sfb_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_br_mask = io_resp_bits_uop_br_mask_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_br_tag = io_resp_bits_uop_br_tag_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ftq_idx = io_resp_bits_uop_ftq_idx_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_edge_inst = io_resp_bits_uop_edge_inst_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_pc_lob = io_resp_bits_uop_pc_lob_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_taken = io_resp_bits_uop_taken_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_imm_packed = io_resp_bits_uop_imm_packed_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_csr_addr = io_resp_bits_uop_csr_addr_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_rob_idx = io_resp_bits_uop_rob_idx_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ldq_idx = io_resp_bits_uop_ldq_idx_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_stq_idx = io_resp_bits_uop_stq_idx_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_rxq_idx = io_resp_bits_uop_rxq_idx_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_pdst = io_resp_bits_uop_pdst_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_prs1 = io_resp_bits_uop_prs1_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_prs2 = io_resp_bits_uop_prs2_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_prs3 = io_resp_bits_uop_prs3_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ppred = io_resp_bits_uop_ppred_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_prs1_busy = io_resp_bits_uop_prs1_busy_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_prs2_busy = io_resp_bits_uop_prs2_busy_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_prs3_busy = io_resp_bits_uop_prs3_busy_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ppred_busy = io_resp_bits_uop_ppred_busy_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_stale_pdst = io_resp_bits_uop_stale_pdst_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_exception = io_resp_bits_uop_exception_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_exc_cause = io_resp_bits_uop_exc_cause_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_bypassable = io_resp_bits_uop_bypassable_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_mem_cmd = io_resp_bits_uop_mem_cmd_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_mem_size = io_resp_bits_uop_mem_size_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_mem_signed = io_resp_bits_uop_mem_signed_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_is_fence = io_resp_bits_uop_is_fence_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_is_fencei = io_resp_bits_uop_is_fencei_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_is_amo = io_resp_bits_uop_is_amo_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_uses_ldq = io_resp_bits_uop_uses_ldq_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_uses_stq = io_resp_bits_uop_uses_stq_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_is_sys_pc2epc = io_resp_bits_uop_is_sys_pc2epc_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_is_unique = io_resp_bits_uop_is_unique_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_flush_on_commit = io_resp_bits_uop_flush_on_commit_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ldst_is_rs1 = io_resp_bits_uop_ldst_is_rs1_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ldst = io_resp_bits_uop_ldst_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_lrs1 = io_resp_bits_uop_lrs1_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_lrs2 = io_resp_bits_uop_lrs2_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_lrs3 = io_resp_bits_uop_lrs3_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_ldst_val = io_resp_bits_uop_ldst_val_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_dst_rtype = io_resp_bits_uop_dst_rtype_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_lrs1_rtype = io_resp_bits_uop_lrs1_rtype_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_lrs2_rtype = io_resp_bits_uop_lrs2_rtype_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_frs3_en = io_resp_bits_uop_frs3_en_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_fp_val = io_resp_bits_uop_fp_val_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_fp_single = io_resp_bits_uop_fp_single_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_xcpt_pf_if = io_resp_bits_uop_xcpt_pf_if_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_xcpt_ae_if = io_resp_bits_uop_xcpt_ae_if_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_xcpt_ma_if = io_resp_bits_uop_xcpt_ma_if_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_bp_debug_if = io_resp_bits_uop_bp_debug_if_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_bp_xcpt_if = io_resp_bits_uop_bp_xcpt_if_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_debug_fsrc = io_resp_bits_uop_debug_fsrc_0; // @[functional-unit.scala:290:7] assign io_resp_bits_uop_debug_tsrc = io_resp_bits_uop_debug_tsrc_0; // @[functional-unit.scala:290:7] assign io_resp_bits_data = io_resp_bits_data_0; // @[functional-unit.scala:290:7] assign io_bypass_0_valid = io_bypass_0_valid_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_uopc = io_bypass_0_bits_uop_uopc_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_inst = io_bypass_0_bits_uop_inst_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_debug_inst = io_bypass_0_bits_uop_debug_inst_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_is_rvc = io_bypass_0_bits_uop_is_rvc_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_debug_pc = io_bypass_0_bits_uop_debug_pc_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_iq_type = io_bypass_0_bits_uop_iq_type_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_fu_code = io_bypass_0_bits_uop_fu_code_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ctrl_br_type = io_bypass_0_bits_uop_ctrl_br_type_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ctrl_op1_sel = io_bypass_0_bits_uop_ctrl_op1_sel_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ctrl_op2_sel = io_bypass_0_bits_uop_ctrl_op2_sel_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ctrl_imm_sel = io_bypass_0_bits_uop_ctrl_imm_sel_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ctrl_op_fcn = io_bypass_0_bits_uop_ctrl_op_fcn_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ctrl_fcn_dw = io_bypass_0_bits_uop_ctrl_fcn_dw_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ctrl_csr_cmd = io_bypass_0_bits_uop_ctrl_csr_cmd_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ctrl_is_load = io_bypass_0_bits_uop_ctrl_is_load_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ctrl_is_sta = io_bypass_0_bits_uop_ctrl_is_sta_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ctrl_is_std = io_bypass_0_bits_uop_ctrl_is_std_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_iw_state = io_bypass_0_bits_uop_iw_state_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_iw_p1_poisoned = io_bypass_0_bits_uop_iw_p1_poisoned_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_iw_p2_poisoned = io_bypass_0_bits_uop_iw_p2_poisoned_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_is_br = io_bypass_0_bits_uop_is_br_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_is_jalr = io_bypass_0_bits_uop_is_jalr_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_is_jal = io_bypass_0_bits_uop_is_jal_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_is_sfb = io_bypass_0_bits_uop_is_sfb_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_br_mask = io_bypass_0_bits_uop_br_mask_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_br_tag = io_bypass_0_bits_uop_br_tag_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ftq_idx = io_bypass_0_bits_uop_ftq_idx_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_edge_inst = io_bypass_0_bits_uop_edge_inst_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_pc_lob = io_bypass_0_bits_uop_pc_lob_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_taken = io_bypass_0_bits_uop_taken_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_imm_packed = io_bypass_0_bits_uop_imm_packed_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_csr_addr = io_bypass_0_bits_uop_csr_addr_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_rob_idx = io_bypass_0_bits_uop_rob_idx_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ldq_idx = io_bypass_0_bits_uop_ldq_idx_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_stq_idx = io_bypass_0_bits_uop_stq_idx_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_rxq_idx = io_bypass_0_bits_uop_rxq_idx_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_pdst = io_bypass_0_bits_uop_pdst_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_prs1 = io_bypass_0_bits_uop_prs1_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_prs2 = io_bypass_0_bits_uop_prs2_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_prs3 = io_bypass_0_bits_uop_prs3_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ppred = io_bypass_0_bits_uop_ppred_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_prs1_busy = io_bypass_0_bits_uop_prs1_busy_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_prs2_busy = io_bypass_0_bits_uop_prs2_busy_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_prs3_busy = io_bypass_0_bits_uop_prs3_busy_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ppred_busy = io_bypass_0_bits_uop_ppred_busy_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_stale_pdst = io_bypass_0_bits_uop_stale_pdst_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_exception = io_bypass_0_bits_uop_exception_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_exc_cause = io_bypass_0_bits_uop_exc_cause_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_bypassable = io_bypass_0_bits_uop_bypassable_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_mem_cmd = io_bypass_0_bits_uop_mem_cmd_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_mem_size = io_bypass_0_bits_uop_mem_size_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_mem_signed = io_bypass_0_bits_uop_mem_signed_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_is_fence = io_bypass_0_bits_uop_is_fence_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_is_fencei = io_bypass_0_bits_uop_is_fencei_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_is_amo = io_bypass_0_bits_uop_is_amo_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_uses_ldq = io_bypass_0_bits_uop_uses_ldq_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_uses_stq = io_bypass_0_bits_uop_uses_stq_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_is_sys_pc2epc = io_bypass_0_bits_uop_is_sys_pc2epc_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_is_unique = io_bypass_0_bits_uop_is_unique_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_flush_on_commit = io_bypass_0_bits_uop_flush_on_commit_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ldst_is_rs1 = io_bypass_0_bits_uop_ldst_is_rs1_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ldst = io_bypass_0_bits_uop_ldst_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_lrs1 = io_bypass_0_bits_uop_lrs1_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_lrs2 = io_bypass_0_bits_uop_lrs2_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_lrs3 = io_bypass_0_bits_uop_lrs3_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_ldst_val = io_bypass_0_bits_uop_ldst_val_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_dst_rtype = io_bypass_0_bits_uop_dst_rtype_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_lrs1_rtype = io_bypass_0_bits_uop_lrs1_rtype_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_lrs2_rtype = io_bypass_0_bits_uop_lrs2_rtype_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_frs3_en = io_bypass_0_bits_uop_frs3_en_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_fp_val = io_bypass_0_bits_uop_fp_val_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_fp_single = io_bypass_0_bits_uop_fp_single_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_xcpt_pf_if = io_bypass_0_bits_uop_xcpt_pf_if_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_xcpt_ae_if = io_bypass_0_bits_uop_xcpt_ae_if_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_xcpt_ma_if = io_bypass_0_bits_uop_xcpt_ma_if_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_bp_debug_if = io_bypass_0_bits_uop_bp_debug_if_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_bp_xcpt_if = io_bypass_0_bits_uop_bp_xcpt_if_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_debug_fsrc = io_bypass_0_bits_uop_debug_fsrc_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_uop_debug_tsrc = io_bypass_0_bits_uop_debug_tsrc_0; // @[functional-unit.scala:290:7] assign io_bypass_0_bits_data = io_bypass_0_bits_data_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_uopc = io_brinfo_uop_uopc_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_inst = io_brinfo_uop_inst_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_debug_inst = io_brinfo_uop_debug_inst_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_is_rvc = io_brinfo_uop_is_rvc_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_debug_pc = io_brinfo_uop_debug_pc_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_iq_type = io_brinfo_uop_iq_type_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_fu_code = io_brinfo_uop_fu_code_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ctrl_br_type = io_brinfo_uop_ctrl_br_type_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ctrl_op1_sel = io_brinfo_uop_ctrl_op1_sel_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ctrl_op2_sel = io_brinfo_uop_ctrl_op2_sel_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ctrl_imm_sel = io_brinfo_uop_ctrl_imm_sel_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ctrl_op_fcn = io_brinfo_uop_ctrl_op_fcn_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ctrl_fcn_dw = io_brinfo_uop_ctrl_fcn_dw_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ctrl_csr_cmd = io_brinfo_uop_ctrl_csr_cmd_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ctrl_is_load = io_brinfo_uop_ctrl_is_load_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ctrl_is_sta = io_brinfo_uop_ctrl_is_sta_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ctrl_is_std = io_brinfo_uop_ctrl_is_std_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_iw_state = io_brinfo_uop_iw_state_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_iw_p1_poisoned = io_brinfo_uop_iw_p1_poisoned_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_iw_p2_poisoned = io_brinfo_uop_iw_p2_poisoned_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_is_br = io_brinfo_uop_is_br_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_is_jalr = io_brinfo_uop_is_jalr_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_is_jal = io_brinfo_uop_is_jal_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_is_sfb = io_brinfo_uop_is_sfb_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_br_mask = io_brinfo_uop_br_mask_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_br_tag = io_brinfo_uop_br_tag_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ftq_idx = io_brinfo_uop_ftq_idx_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_edge_inst = io_brinfo_uop_edge_inst_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_pc_lob = io_brinfo_uop_pc_lob_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_taken = io_brinfo_uop_taken_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_imm_packed = io_brinfo_uop_imm_packed_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_csr_addr = io_brinfo_uop_csr_addr_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_rob_idx = io_brinfo_uop_rob_idx_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ldq_idx = io_brinfo_uop_ldq_idx_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_stq_idx = io_brinfo_uop_stq_idx_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_rxq_idx = io_brinfo_uop_rxq_idx_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_pdst = io_brinfo_uop_pdst_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_prs1 = io_brinfo_uop_prs1_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_prs2 = io_brinfo_uop_prs2_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_prs3 = io_brinfo_uop_prs3_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ppred = io_brinfo_uop_ppred_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_prs1_busy = io_brinfo_uop_prs1_busy_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_prs2_busy = io_brinfo_uop_prs2_busy_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_prs3_busy = io_brinfo_uop_prs3_busy_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ppred_busy = io_brinfo_uop_ppred_busy_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_stale_pdst = io_brinfo_uop_stale_pdst_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_exception = io_brinfo_uop_exception_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_exc_cause = io_brinfo_uop_exc_cause_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_bypassable = io_brinfo_uop_bypassable_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_mem_cmd = io_brinfo_uop_mem_cmd_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_mem_size = io_brinfo_uop_mem_size_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_mem_signed = io_brinfo_uop_mem_signed_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_is_fence = io_brinfo_uop_is_fence_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_is_fencei = io_brinfo_uop_is_fencei_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_is_amo = io_brinfo_uop_is_amo_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_uses_ldq = io_brinfo_uop_uses_ldq_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_uses_stq = io_brinfo_uop_uses_stq_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_is_sys_pc2epc = io_brinfo_uop_is_sys_pc2epc_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_is_unique = io_brinfo_uop_is_unique_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_flush_on_commit = io_brinfo_uop_flush_on_commit_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ldst_is_rs1 = io_brinfo_uop_ldst_is_rs1_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ldst = io_brinfo_uop_ldst_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_lrs1 = io_brinfo_uop_lrs1_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_lrs2 = io_brinfo_uop_lrs2_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_lrs3 = io_brinfo_uop_lrs3_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_ldst_val = io_brinfo_uop_ldst_val_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_dst_rtype = io_brinfo_uop_dst_rtype_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_lrs1_rtype = io_brinfo_uop_lrs1_rtype_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_lrs2_rtype = io_brinfo_uop_lrs2_rtype_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_frs3_en = io_brinfo_uop_frs3_en_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_fp_val = io_brinfo_uop_fp_val_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_fp_single = io_brinfo_uop_fp_single_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_xcpt_pf_if = io_brinfo_uop_xcpt_pf_if_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_xcpt_ae_if = io_brinfo_uop_xcpt_ae_if_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_xcpt_ma_if = io_brinfo_uop_xcpt_ma_if_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_bp_debug_if = io_brinfo_uop_bp_debug_if_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_bp_xcpt_if = io_brinfo_uop_bp_xcpt_if_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_debug_fsrc = io_brinfo_uop_debug_fsrc_0; // @[functional-unit.scala:290:7] assign io_brinfo_uop_debug_tsrc = io_brinfo_uop_debug_tsrc_0; // @[functional-unit.scala:290:7] assign io_brinfo_valid = io_brinfo_valid_0; // @[functional-unit.scala:290:7] assign io_brinfo_mispredict = io_brinfo_mispredict_0; // @[functional-unit.scala:290:7] assign io_brinfo_taken = io_brinfo_taken_0; // @[functional-unit.scala:290:7] assign io_brinfo_cfi_type = io_brinfo_cfi_type_0; // @[functional-unit.scala:290:7] assign io_brinfo_pc_sel = io_brinfo_pc_sel_0; // @[functional-unit.scala:290:7] assign io_brinfo_jalr_target = io_brinfo_jalr_target_0; // @[functional-unit.scala:290:7] assign io_brinfo_target_offset = io_brinfo_target_offset_0; // @[functional-unit.scala:290:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_46( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_b_ready, // @[Monitor.scala:20:14] input io_in_b_valid, // @[Monitor.scala:20:14] input [1:0] io_in_b_bits_param, // @[Monitor.scala:20:14] input [5:0] io_in_b_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_b_bits_address, // @[Monitor.scala:20:14] input io_in_c_ready, // @[Monitor.scala:20:14] input io_in_c_valid, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_c_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14] input io_in_c_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [5:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt, // @[Monitor.scala:20:14] input io_in_e_valid, // @[Monitor.scala:20:14] input [2:0] io_in_e_bits_sink // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire [12:0] _GEN_0 = {10'h0, io_in_c_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [2:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [5:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _d_first_T_3 = io_in_d_ready & io_in_d_valid; // @[Decoupled.scala:51:35] reg [2:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [5:0] source_1; // @[Monitor.scala:541:22] reg [2:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [2:0] b_first_counter; // @[Edges.scala:229:27] reg [1:0] param_2; // @[Monitor.scala:411:22] reg [5:0] source_2; // @[Monitor.scala:413:22] reg [31:0] address_1; // @[Monitor.scala:414:22] wire _c_first_T_1 = io_in_c_ready & io_in_c_valid; // @[Decoupled.scala:51:35] reg [2:0] c_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_3; // @[Monitor.scala:515:22] reg [2:0] param_3; // @[Monitor.scala:516:22] reg [2:0] size_3; // @[Monitor.scala:517:22] reg [5:0] source_3; // @[Monitor.scala:518:22] reg [31:0] address_2; // @[Monitor.scala:519:22] reg [62:0] inflight; // @[Monitor.scala:614:27] reg [251:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [251:0] inflight_sizes; // @[Monitor.scala:618:33] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire [63:0] _GEN_1 = {58'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_2 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_3 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [63:0] _GEN_4 = {58'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [62:0] inflight_1; // @[Monitor.scala:726:35] reg [251:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [2:0] c_first_counter_1; // @[Edges.scala:229:27] wire c_first_1 = c_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_5 = io_in_c_bits_opcode[2] & io_in_c_bits_opcode[1]; // @[Edges.scala:68:{36,40,51}] wire [63:0] _GEN_6 = {58'h0, io_in_c_bits_source}; // @[OneHot.scala:58:35] wire _GEN_7 = _c_first_T_1 & c_first_1 & _GEN_5; // @[Decoupled.scala:51:35] reg [31:0] watchdog_1; // @[Monitor.scala:818:27] reg [6:0] inflight_2; // @[Monitor.scala:828:27] reg [2:0] d_first_counter_3; // @[Edges.scala:229:27] wire d_first_3 = d_first_counter_3 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_8 = _d_first_T_3 & d_first_3 & io_in_d_bits_opcode[2] & ~(io_in_d_bits_opcode[1]); // @[Decoupled.scala:51:35] wire [7:0] _d_set_T = 8'h1 << io_in_d_bits_sink; // @[OneHot.scala:58:35] wire [6:0] d_set = _GEN_8 ? _d_set_T[6:0] : 7'h0; // @[OneHot.scala:58:35]
Generate the Verilog code corresponding to the following Chisel files. File primitives.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object lowMask { def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt = { require(topBound != bottomBound) val numInVals = BigInt(1)<<in.getWidth if (topBound < bottomBound) { lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound) } else if (numInVals > 64 /* Empirical */) { // For simulation performance, we should avoid generating // exteremely wide shifters, so we divide and conquer. // Empirically, this does not impact synthesis QoR. val mid = numInVals / 2 val msb = in(in.getWidth - 1) val lsbs = in(in.getWidth - 2, 0) if (mid < topBound) { if (mid <= bottomBound) { Mux(msb, lowMask(lsbs, topBound - mid, bottomBound - mid), 0.U ) } else { Mux(msb, lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U, lowMask(lsbs, mid, bottomBound) ) } } else { ~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound)) } } else { val shift = (BigInt(-1)<<numInVals.toInt).S>>in Reverse( shift( (numInVals - 1 - bottomBound).toInt, (numInVals - topBound).toInt ) ) } } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object countLeadingZeros { def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy2 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 1)>>1 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 2).orR reducedVec.asUInt } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy4 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 3)>>2 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 4).orR reducedVec.asUInt } } File MulAddRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulAddRecFN_interIo(expWidth: Int, sigWidth: Int) extends Bundle { //*** ENCODE SOME OF THESE CASES IN FEWER BITS?: val isSigNaNAny = Bool() val isNaNAOrB = Bool() val isInfA = Bool() val isZeroA = Bool() val isInfB = Bool() val isZeroB = Bool() val signProd = Bool() val isNaNC = Bool() val isInfC = Bool() val isZeroC = Bool() val sExpSum = SInt((expWidth + 2).W) val doSubMags = Bool() val CIsDominant = Bool() val CDom_CAlignDist = UInt(log2Ceil(sigWidth + 1).W) val highAlignedSigC = UInt((sigWidth + 2).W) val bit0AlignedSigC = UInt(1.W) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulAddRecFNToRaw_preMul(expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"MulAddRecFNToRaw_preMul_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val op = Input(Bits(2.W)) val a = Input(Bits((expWidth + sigWidth + 1).W)) val b = Input(Bits((expWidth + sigWidth + 1).W)) val c = Input(Bits((expWidth + sigWidth + 1).W)) val mulAddA = Output(UInt(sigWidth.W)) val mulAddB = Output(UInt(sigWidth.W)) val mulAddC = Output(UInt((sigWidth * 2).W)) val toPostMul = Output(new MulAddRecFN_interIo(expWidth, sigWidth)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ //*** POSSIBLE TO REDUCE THIS BY 1 OR 2 BITS? (CURRENTLY 2 BITS BETWEEN //*** UNSHIFTED C AND PRODUCT): val sigSumWidth = sigWidth * 3 + 3 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val rawA = rawFloatFromRecFN(expWidth, sigWidth, io.a) val rawB = rawFloatFromRecFN(expWidth, sigWidth, io.b) val rawC = rawFloatFromRecFN(expWidth, sigWidth, io.c) val signProd = rawA.sign ^ rawB.sign ^ io.op(1) //*** REVIEW THE BIAS FOR 'sExpAlignedProd': val sExpAlignedProd = rawA.sExp +& rawB.sExp + (-(BigInt(1)<<expWidth) + sigWidth + 3).S val doSubMags = signProd ^ rawC.sign ^ io.op(0) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sNatCAlignDist = sExpAlignedProd - rawC.sExp val posNatCAlignDist = sNatCAlignDist(expWidth + 1, 0) val isMinCAlign = rawA.isZero || rawB.isZero || (sNatCAlignDist < 0.S) val CIsDominant = ! rawC.isZero && (isMinCAlign || (posNatCAlignDist <= sigWidth.U)) val CAlignDist = Mux(isMinCAlign, 0.U, Mux(posNatCAlignDist < (sigSumWidth - 1).U, posNatCAlignDist(log2Ceil(sigSumWidth) - 1, 0), (sigSumWidth - 1).U ) ) val mainAlignedSigC = (Mux(doSubMags, ~rawC.sig, rawC.sig) ## Fill(sigSumWidth - sigWidth + 2, doSubMags)).asSInt>>CAlignDist val reduced4CExtra = (orReduceBy4(rawC.sig<<((sigSumWidth - sigWidth - 1) & 3)) & lowMask( CAlignDist>>2, //*** NOT NEEDED?: // (sigSumWidth + 2)>>2, (sigSumWidth - 1)>>2, (sigSumWidth - sigWidth - 1)>>2 ) ).orR val alignedSigC = Cat(mainAlignedSigC>>3, Mux(doSubMags, mainAlignedSigC(2, 0).andR && ! reduced4CExtra, mainAlignedSigC(2, 0).orR || reduced4CExtra ) ) //------------------------------------------------------------------------ //------------------------------------------------------------------------ io.mulAddA := rawA.sig io.mulAddB := rawB.sig io.mulAddC := alignedSigC(sigWidth * 2, 1) io.toPostMul.isSigNaNAny := isSigNaNRawFloat(rawA) || isSigNaNRawFloat(rawB) || isSigNaNRawFloat(rawC) io.toPostMul.isNaNAOrB := rawA.isNaN || rawB.isNaN io.toPostMul.isInfA := rawA.isInf io.toPostMul.isZeroA := rawA.isZero io.toPostMul.isInfB := rawB.isInf io.toPostMul.isZeroB := rawB.isZero io.toPostMul.signProd := signProd io.toPostMul.isNaNC := rawC.isNaN io.toPostMul.isInfC := rawC.isInf io.toPostMul.isZeroC := rawC.isZero io.toPostMul.sExpSum := Mux(CIsDominant, rawC.sExp, sExpAlignedProd - sigWidth.S) io.toPostMul.doSubMags := doSubMags io.toPostMul.CIsDominant := CIsDominant io.toPostMul.CDom_CAlignDist := CAlignDist(log2Ceil(sigWidth + 1) - 1, 0) io.toPostMul.highAlignedSigC := alignedSigC(sigSumWidth - 1, sigWidth * 2 + 1) io.toPostMul.bit0AlignedSigC := alignedSigC(0) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulAddRecFNToRaw_postMul(expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"MulAddRecFNToRaw_postMul_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val fromPreMul = Input(new MulAddRecFN_interIo(expWidth, sigWidth)) val mulAddResult = Input(UInt((sigWidth * 2 + 1).W)) val roundingMode = Input(UInt(3.W)) val invalidExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth + 2)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigSumWidth = sigWidth * 3 + 3 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_min = (io.roundingMode === round_min) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val opSignC = io.fromPreMul.signProd ^ io.fromPreMul.doSubMags val sigSum = Cat(Mux(io.mulAddResult(sigWidth * 2), io.fromPreMul.highAlignedSigC + 1.U, io.fromPreMul.highAlignedSigC ), io.mulAddResult(sigWidth * 2 - 1, 0), io.fromPreMul.bit0AlignedSigC ) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val CDom_sign = opSignC val CDom_sExp = io.fromPreMul.sExpSum - io.fromPreMul.doSubMags.zext val CDom_absSigSum = Mux(io.fromPreMul.doSubMags, ~sigSum(sigSumWidth - 1, sigWidth + 1), 0.U(1.W) ## //*** IF GAP IS REDUCED TO 1 BIT, MUST REDUCE THIS COMPONENT TO 1 BIT TOO: io.fromPreMul.highAlignedSigC(sigWidth + 1, sigWidth) ## sigSum(sigSumWidth - 3, sigWidth + 2) ) val CDom_absSigSumExtra = Mux(io.fromPreMul.doSubMags, (~sigSum(sigWidth, 1)).orR, sigSum(sigWidth + 1, 1).orR ) val CDom_mainSig = (CDom_absSigSum<<io.fromPreMul.CDom_CAlignDist)( sigWidth * 2 + 1, sigWidth - 3) val CDom_reduced4SigExtra = (orReduceBy4(CDom_absSigSum(sigWidth - 1, 0)<<(~sigWidth & 3)) & lowMask(io.fromPreMul.CDom_CAlignDist>>2, 0, sigWidth>>2)).orR val CDom_sig = Cat(CDom_mainSig>>3, CDom_mainSig(2, 0).orR || CDom_reduced4SigExtra || CDom_absSigSumExtra ) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val notCDom_signSigSum = sigSum(sigWidth * 2 + 3) val notCDom_absSigSum = Mux(notCDom_signSigSum, ~sigSum(sigWidth * 2 + 2, 0), sigSum(sigWidth * 2 + 2, 0) + io.fromPreMul.doSubMags ) val notCDom_reduced2AbsSigSum = orReduceBy2(notCDom_absSigSum) val notCDom_normDistReduced2 = countLeadingZeros(notCDom_reduced2AbsSigSum) val notCDom_nearNormDist = notCDom_normDistReduced2<<1 val notCDom_sExp = io.fromPreMul.sExpSum - notCDom_nearNormDist.asUInt.zext val notCDom_mainSig = (notCDom_absSigSum<<notCDom_nearNormDist)( sigWidth * 2 + 3, sigWidth - 1) val notCDom_reduced4SigExtra = (orReduceBy2( notCDom_reduced2AbsSigSum(sigWidth>>1, 0)<<((sigWidth>>1) & 1)) & lowMask(notCDom_normDistReduced2>>1, 0, (sigWidth + 2)>>2) ).orR val notCDom_sig = Cat(notCDom_mainSig>>3, notCDom_mainSig(2, 0).orR || notCDom_reduced4SigExtra ) val notCDom_completeCancellation = (notCDom_sig(sigWidth + 2, sigWidth + 1) === 0.U) val notCDom_sign = Mux(notCDom_completeCancellation, roundingMode_min, io.fromPreMul.signProd ^ notCDom_signSigSum ) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val notNaN_isInfProd = io.fromPreMul.isInfA || io.fromPreMul.isInfB val notNaN_isInfOut = notNaN_isInfProd || io.fromPreMul.isInfC val notNaN_addZeros = (io.fromPreMul.isZeroA || io.fromPreMul.isZeroB) && io.fromPreMul.isZeroC io.invalidExc := io.fromPreMul.isSigNaNAny || (io.fromPreMul.isInfA && io.fromPreMul.isZeroB) || (io.fromPreMul.isZeroA && io.fromPreMul.isInfB) || (! io.fromPreMul.isNaNAOrB && (io.fromPreMul.isInfA || io.fromPreMul.isInfB) && io.fromPreMul.isInfC && io.fromPreMul.doSubMags) io.rawOut.isNaN := io.fromPreMul.isNaNAOrB || io.fromPreMul.isNaNC io.rawOut.isInf := notNaN_isInfOut //*** IMPROVE?: io.rawOut.isZero := notNaN_addZeros || (! io.fromPreMul.CIsDominant && notCDom_completeCancellation) io.rawOut.sign := (notNaN_isInfProd && io.fromPreMul.signProd) || (io.fromPreMul.isInfC && opSignC) || (notNaN_addZeros && ! roundingMode_min && io.fromPreMul.signProd && opSignC) || (notNaN_addZeros && roundingMode_min && (io.fromPreMul.signProd || opSignC)) || (! notNaN_isInfOut && ! notNaN_addZeros && Mux(io.fromPreMul.CIsDominant, CDom_sign, notCDom_sign)) io.rawOut.sExp := Mux(io.fromPreMul.CIsDominant, CDom_sExp, notCDom_sExp) io.rawOut.sig := Mux(io.fromPreMul.CIsDominant, CDom_sig, notCDom_sig) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulAddRecFN(expWidth: Int, sigWidth: Int) extends RawModule { override def desiredName = s"MulAddRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val op = Input(Bits(2.W)) val a = Input(Bits((expWidth + sigWidth + 1).W)) val b = Input(Bits((expWidth + sigWidth + 1).W)) val c = Input(Bits((expWidth + sigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val mulAddRecFNToRaw_preMul = Module(new MulAddRecFNToRaw_preMul(expWidth, sigWidth)) val mulAddRecFNToRaw_postMul = Module(new MulAddRecFNToRaw_postMul(expWidth, sigWidth)) mulAddRecFNToRaw_preMul.io.op := io.op mulAddRecFNToRaw_preMul.io.a := io.a mulAddRecFNToRaw_preMul.io.b := io.b mulAddRecFNToRaw_preMul.io.c := io.c val mulAddResult = (mulAddRecFNToRaw_preMul.io.mulAddA * mulAddRecFNToRaw_preMul.io.mulAddB) +& mulAddRecFNToRaw_preMul.io.mulAddC mulAddRecFNToRaw_postMul.io.fromPreMul := mulAddRecFNToRaw_preMul.io.toPostMul mulAddRecFNToRaw_postMul.io.mulAddResult := mulAddResult mulAddRecFNToRaw_postMul.io.roundingMode := io.roundingMode //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundRawFNToRecFN = Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0)) roundRawFNToRecFN.io.invalidExc := mulAddRecFNToRaw_postMul.io.invalidExc roundRawFNToRecFN.io.infiniteExc := false.B roundRawFNToRecFN.io.in := mulAddRecFNToRaw_postMul.io.rawOut roundRawFNToRecFN.io.roundingMode := io.roundingMode roundRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundRawFNToRecFN.io.out io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } }
module MulAddRecFNToRaw_preMul_e8_s24_59( // @[MulAddRecFN.scala:71:7] input [32:0] io_a, // @[MulAddRecFN.scala:74:16] input [32:0] io_b, // @[MulAddRecFN.scala:74:16] input [32:0] io_c, // @[MulAddRecFN.scala:74:16] output [23:0] io_mulAddA, // @[MulAddRecFN.scala:74:16] output [23:0] io_mulAddB, // @[MulAddRecFN.scala:74:16] output [47:0] io_mulAddC, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isSigNaNAny, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isNaNAOrB, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isInfA, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isZeroA, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isInfB, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isZeroB, // @[MulAddRecFN.scala:74:16] output io_toPostMul_signProd, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isNaNC, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isInfC, // @[MulAddRecFN.scala:74:16] output io_toPostMul_isZeroC, // @[MulAddRecFN.scala:74:16] output [9:0] io_toPostMul_sExpSum, // @[MulAddRecFN.scala:74:16] output io_toPostMul_doSubMags, // @[MulAddRecFN.scala:74:16] output io_toPostMul_CIsDominant, // @[MulAddRecFN.scala:74:16] output [4:0] io_toPostMul_CDom_CAlignDist, // @[MulAddRecFN.scala:74:16] output [25:0] io_toPostMul_highAlignedSigC, // @[MulAddRecFN.scala:74:16] output io_toPostMul_bit0AlignedSigC // @[MulAddRecFN.scala:74:16] ); wire [32:0] io_a_0 = io_a; // @[MulAddRecFN.scala:71:7] wire [32:0] io_b_0 = io_b; // @[MulAddRecFN.scala:71:7] wire [32:0] io_c_0 = io_c; // @[MulAddRecFN.scala:71:7] wire _signProd_T_1 = 1'h0; // @[MulAddRecFN.scala:97:49] wire _doSubMags_T_1 = 1'h0; // @[MulAddRecFN.scala:102:49] wire [1:0] io_op = 2'h0; // @[MulAddRecFN.scala:71:7, :74:16] wire [47:0] _io_mulAddC_T; // @[MulAddRecFN.scala:143:30] wire _io_toPostMul_isSigNaNAny_T_10; // @[MulAddRecFN.scala:146:58] wire _io_toPostMul_isNaNAOrB_T; // @[MulAddRecFN.scala:148:42] wire rawA_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawA_isZero; // @[rawFloatFromRecFN.scala:55:23] wire rawB_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawB_isZero; // @[rawFloatFromRecFN.scala:55:23] wire signProd; // @[MulAddRecFN.scala:97:42] wire rawC_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawC_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawC_isZero; // @[rawFloatFromRecFN.scala:55:23] wire doSubMags; // @[MulAddRecFN.scala:102:42] wire CIsDominant; // @[MulAddRecFN.scala:110:23] wire [4:0] _io_toPostMul_CDom_CAlignDist_T; // @[MulAddRecFN.scala:161:47] wire [25:0] _io_toPostMul_highAlignedSigC_T; // @[MulAddRecFN.scala:163:20] wire _io_toPostMul_bit0AlignedSigC_T; // @[MulAddRecFN.scala:164:48] wire io_toPostMul_isSigNaNAny_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isNaNAOrB_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isInfA_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isZeroA_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isInfB_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isZeroB_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_signProd_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isNaNC_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isInfC_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_isZeroC_0; // @[MulAddRecFN.scala:71:7] wire [9:0] io_toPostMul_sExpSum_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_doSubMags_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_CIsDominant_0; // @[MulAddRecFN.scala:71:7] wire [4:0] io_toPostMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:71:7] wire [25:0] io_toPostMul_highAlignedSigC_0; // @[MulAddRecFN.scala:71:7] wire io_toPostMul_bit0AlignedSigC_0; // @[MulAddRecFN.scala:71:7] wire [23:0] io_mulAddA_0; // @[MulAddRecFN.scala:71:7] wire [23:0] io_mulAddB_0; // @[MulAddRecFN.scala:71:7] wire [47:0] io_mulAddC_0; // @[MulAddRecFN.scala:71:7] wire [8:0] rawA_exp = io_a_0[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawA_isZero_T = rawA_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawA_isZero_0 = _rawA_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] assign rawA_isZero = rawA_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawA_isSpecial_T = rawA_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawA_isSpecial = &_rawA_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawA_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _rawA_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] assign io_toPostMul_isInfA_0 = rawA_isInf; // @[rawFloatFromRecFN.scala:55:23] assign io_toPostMul_isZeroA_0 = rawA_isZero; // @[rawFloatFromRecFN.scala:55:23] wire _rawA_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [9:0] _rawA_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _rawA_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawA_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawA_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] rawA_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] rawA_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawA_out_isNaN_T = rawA_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawA_out_isInf_T = rawA_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawA_out_isNaN_T_1 = rawA_isSpecial & _rawA_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawA_isNaN = _rawA_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawA_out_isInf_T_1 = ~_rawA_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawA_out_isInf_T_2 = rawA_isSpecial & _rawA_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawA_isInf = _rawA_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawA_out_sign_T = io_a_0[32]; // @[rawFloatFromRecFN.scala:59:25] assign rawA_sign = _rawA_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawA_out_sExp_T = {1'h0, rawA_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawA_sExp = _rawA_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawA_out_sig_T = ~rawA_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawA_out_sig_T_1 = {1'h0, _rawA_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _rawA_out_sig_T_2 = io_a_0[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawA_out_sig_T_3 = {_rawA_out_sig_T_1, _rawA_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawA_sig = _rawA_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire [8:0] rawB_exp = io_b_0[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawB_isZero_T = rawB_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawB_isZero_0 = _rawB_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] assign rawB_isZero = rawB_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawB_isSpecial_T = rawB_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawB_isSpecial = &_rawB_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawB_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _rawB_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] assign io_toPostMul_isInfB_0 = rawB_isInf; // @[rawFloatFromRecFN.scala:55:23] assign io_toPostMul_isZeroB_0 = rawB_isZero; // @[rawFloatFromRecFN.scala:55:23] wire _rawB_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [9:0] _rawB_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _rawB_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawB_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawB_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] rawB_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] rawB_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawB_out_isNaN_T = rawB_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawB_out_isInf_T = rawB_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawB_out_isNaN_T_1 = rawB_isSpecial & _rawB_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawB_isNaN = _rawB_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawB_out_isInf_T_1 = ~_rawB_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawB_out_isInf_T_2 = rawB_isSpecial & _rawB_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawB_isInf = _rawB_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawB_out_sign_T = io_b_0[32]; // @[rawFloatFromRecFN.scala:59:25] assign rawB_sign = _rawB_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawB_out_sExp_T = {1'h0, rawB_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawB_sExp = _rawB_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawB_out_sig_T = ~rawB_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawB_out_sig_T_1 = {1'h0, _rawB_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _rawB_out_sig_T_2 = io_b_0[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawB_out_sig_T_3 = {_rawB_out_sig_T_1, _rawB_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawB_sig = _rawB_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire [8:0] rawC_exp = io_c_0[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawC_isZero_T = rawC_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawC_isZero_0 = _rawC_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] assign rawC_isZero = rawC_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawC_isSpecial_T = rawC_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawC_isSpecial = &_rawC_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawC_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] assign io_toPostMul_isNaNC_0 = rawC_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire _rawC_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] assign io_toPostMul_isInfC_0 = rawC_isInf; // @[rawFloatFromRecFN.scala:55:23] assign io_toPostMul_isZeroC_0 = rawC_isZero; // @[rawFloatFromRecFN.scala:55:23] wire _rawC_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [9:0] _rawC_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _rawC_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawC_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] rawC_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] rawC_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawC_out_isNaN_T = rawC_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawC_out_isInf_T = rawC_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawC_out_isNaN_T_1 = rawC_isSpecial & _rawC_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawC_isNaN = _rawC_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawC_out_isInf_T_1 = ~_rawC_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawC_out_isInf_T_2 = rawC_isSpecial & _rawC_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawC_isInf = _rawC_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawC_out_sign_T = io_c_0[32]; // @[rawFloatFromRecFN.scala:59:25] assign rawC_sign = _rawC_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawC_out_sExp_T = {1'h0, rawC_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawC_sExp = _rawC_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawC_out_sig_T = ~rawC_isZero_0; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawC_out_sig_T_1 = {1'h0, _rawC_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _rawC_out_sig_T_2 = io_c_0[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawC_out_sig_T_3 = {_rawC_out_sig_T_1, _rawC_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawC_sig = _rawC_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire _signProd_T = rawA_sign ^ rawB_sign; // @[rawFloatFromRecFN.scala:55:23] assign signProd = _signProd_T; // @[MulAddRecFN.scala:97:{30,42}] assign io_toPostMul_signProd_0 = signProd; // @[MulAddRecFN.scala:71:7, :97:42] wire [10:0] _sExpAlignedProd_T = {rawA_sExp[9], rawA_sExp} + {rawB_sExp[9], rawB_sExp}; // @[rawFloatFromRecFN.scala:55:23] wire [11:0] _sExpAlignedProd_T_1 = {_sExpAlignedProd_T[10], _sExpAlignedProd_T} - 12'hE5; // @[MulAddRecFN.scala:100:{19,32}] wire [10:0] _sExpAlignedProd_T_2 = _sExpAlignedProd_T_1[10:0]; // @[MulAddRecFN.scala:100:32] wire [10:0] sExpAlignedProd = _sExpAlignedProd_T_2; // @[MulAddRecFN.scala:100:32] wire _doSubMags_T = signProd ^ rawC_sign; // @[rawFloatFromRecFN.scala:55:23] assign doSubMags = _doSubMags_T; // @[MulAddRecFN.scala:102:{30,42}] assign io_toPostMul_doSubMags_0 = doSubMags; // @[MulAddRecFN.scala:71:7, :102:42] wire [11:0] _GEN = {sExpAlignedProd[10], sExpAlignedProd}; // @[MulAddRecFN.scala:100:32, :106:42] wire [11:0] _sNatCAlignDist_T = _GEN - {{2{rawC_sExp[9]}}, rawC_sExp}; // @[rawFloatFromRecFN.scala:55:23] wire [10:0] _sNatCAlignDist_T_1 = _sNatCAlignDist_T[10:0]; // @[MulAddRecFN.scala:106:42] wire [10:0] sNatCAlignDist = _sNatCAlignDist_T_1; // @[MulAddRecFN.scala:106:42] wire [9:0] posNatCAlignDist = sNatCAlignDist[9:0]; // @[MulAddRecFN.scala:106:42, :107:42] wire _isMinCAlign_T = rawA_isZero | rawB_isZero; // @[rawFloatFromRecFN.scala:55:23] wire _isMinCAlign_T_1 = $signed(sNatCAlignDist) < 11'sh0; // @[MulAddRecFN.scala:106:42, :108:69] wire isMinCAlign = _isMinCAlign_T | _isMinCAlign_T_1; // @[MulAddRecFN.scala:108:{35,50,69}] wire _CIsDominant_T = ~rawC_isZero; // @[rawFloatFromRecFN.scala:55:23] wire _CIsDominant_T_1 = posNatCAlignDist < 10'h19; // @[MulAddRecFN.scala:107:42, :110:60] wire _CIsDominant_T_2 = isMinCAlign | _CIsDominant_T_1; // @[MulAddRecFN.scala:108:50, :110:{39,60}] assign CIsDominant = _CIsDominant_T & _CIsDominant_T_2; // @[MulAddRecFN.scala:110:{9,23,39}] assign io_toPostMul_CIsDominant_0 = CIsDominant; // @[MulAddRecFN.scala:71:7, :110:23] wire _CAlignDist_T = posNatCAlignDist < 10'h4A; // @[MulAddRecFN.scala:107:42, :114:34] wire [6:0] _CAlignDist_T_1 = posNatCAlignDist[6:0]; // @[MulAddRecFN.scala:107:42, :115:33] wire [6:0] _CAlignDist_T_2 = _CAlignDist_T ? _CAlignDist_T_1 : 7'h4A; // @[MulAddRecFN.scala:114:{16,34}, :115:33] wire [6:0] CAlignDist = isMinCAlign ? 7'h0 : _CAlignDist_T_2; // @[MulAddRecFN.scala:108:50, :112:12, :114:16] wire [24:0] _mainAlignedSigC_T = ~rawC_sig; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] _mainAlignedSigC_T_1 = doSubMags ? _mainAlignedSigC_T : rawC_sig; // @[rawFloatFromRecFN.scala:55:23] wire [52:0] _mainAlignedSigC_T_2 = {53{doSubMags}}; // @[MulAddRecFN.scala:102:42, :120:53] wire [77:0] _mainAlignedSigC_T_3 = {_mainAlignedSigC_T_1, _mainAlignedSigC_T_2}; // @[MulAddRecFN.scala:120:{13,46,53}] wire [77:0] _mainAlignedSigC_T_4 = _mainAlignedSigC_T_3; // @[MulAddRecFN.scala:120:{46,94}] wire [77:0] mainAlignedSigC = $signed($signed(_mainAlignedSigC_T_4) >>> CAlignDist); // @[MulAddRecFN.scala:112:12, :120:{94,100}] wire [26:0] _reduced4CExtra_T = {rawC_sig, 2'h0}; // @[rawFloatFromRecFN.scala:55:23] wire _reduced4CExtra_reducedVec_0_T_1; // @[primitives.scala:120:54] wire _reduced4CExtra_reducedVec_1_T_1; // @[primitives.scala:120:54] wire _reduced4CExtra_reducedVec_2_T_1; // @[primitives.scala:120:54] wire _reduced4CExtra_reducedVec_3_T_1; // @[primitives.scala:120:54] wire _reduced4CExtra_reducedVec_4_T_1; // @[primitives.scala:120:54] wire _reduced4CExtra_reducedVec_5_T_1; // @[primitives.scala:120:54] wire _reduced4CExtra_reducedVec_6_T_1; // @[primitives.scala:123:57] wire reduced4CExtra_reducedVec_0; // @[primitives.scala:118:30] wire reduced4CExtra_reducedVec_1; // @[primitives.scala:118:30] wire reduced4CExtra_reducedVec_2; // @[primitives.scala:118:30] wire reduced4CExtra_reducedVec_3; // @[primitives.scala:118:30] wire reduced4CExtra_reducedVec_4; // @[primitives.scala:118:30] wire reduced4CExtra_reducedVec_5; // @[primitives.scala:118:30] wire reduced4CExtra_reducedVec_6; // @[primitives.scala:118:30] wire [3:0] _reduced4CExtra_reducedVec_0_T = _reduced4CExtra_T[3:0]; // @[primitives.scala:120:33] assign _reduced4CExtra_reducedVec_0_T_1 = |_reduced4CExtra_reducedVec_0_T; // @[primitives.scala:120:{33,54}] assign reduced4CExtra_reducedVec_0 = _reduced4CExtra_reducedVec_0_T_1; // @[primitives.scala:118:30, :120:54] wire [3:0] _reduced4CExtra_reducedVec_1_T = _reduced4CExtra_T[7:4]; // @[primitives.scala:120:33] assign _reduced4CExtra_reducedVec_1_T_1 = |_reduced4CExtra_reducedVec_1_T; // @[primitives.scala:120:{33,54}] assign reduced4CExtra_reducedVec_1 = _reduced4CExtra_reducedVec_1_T_1; // @[primitives.scala:118:30, :120:54] wire [3:0] _reduced4CExtra_reducedVec_2_T = _reduced4CExtra_T[11:8]; // @[primitives.scala:120:33] assign _reduced4CExtra_reducedVec_2_T_1 = |_reduced4CExtra_reducedVec_2_T; // @[primitives.scala:120:{33,54}] assign reduced4CExtra_reducedVec_2 = _reduced4CExtra_reducedVec_2_T_1; // @[primitives.scala:118:30, :120:54] wire [3:0] _reduced4CExtra_reducedVec_3_T = _reduced4CExtra_T[15:12]; // @[primitives.scala:120:33] assign _reduced4CExtra_reducedVec_3_T_1 = |_reduced4CExtra_reducedVec_3_T; // @[primitives.scala:120:{33,54}] assign reduced4CExtra_reducedVec_3 = _reduced4CExtra_reducedVec_3_T_1; // @[primitives.scala:118:30, :120:54] wire [3:0] _reduced4CExtra_reducedVec_4_T = _reduced4CExtra_T[19:16]; // @[primitives.scala:120:33] assign _reduced4CExtra_reducedVec_4_T_1 = |_reduced4CExtra_reducedVec_4_T; // @[primitives.scala:120:{33,54}] assign reduced4CExtra_reducedVec_4 = _reduced4CExtra_reducedVec_4_T_1; // @[primitives.scala:118:30, :120:54] wire [3:0] _reduced4CExtra_reducedVec_5_T = _reduced4CExtra_T[23:20]; // @[primitives.scala:120:33] assign _reduced4CExtra_reducedVec_5_T_1 = |_reduced4CExtra_reducedVec_5_T; // @[primitives.scala:120:{33,54}] assign reduced4CExtra_reducedVec_5 = _reduced4CExtra_reducedVec_5_T_1; // @[primitives.scala:118:30, :120:54] wire [2:0] _reduced4CExtra_reducedVec_6_T = _reduced4CExtra_T[26:24]; // @[primitives.scala:123:15] assign _reduced4CExtra_reducedVec_6_T_1 = |_reduced4CExtra_reducedVec_6_T; // @[primitives.scala:123:{15,57}] assign reduced4CExtra_reducedVec_6 = _reduced4CExtra_reducedVec_6_T_1; // @[primitives.scala:118:30, :123:57] wire [1:0] reduced4CExtra_lo_hi = {reduced4CExtra_reducedVec_2, reduced4CExtra_reducedVec_1}; // @[primitives.scala:118:30, :124:20] wire [2:0] reduced4CExtra_lo = {reduced4CExtra_lo_hi, reduced4CExtra_reducedVec_0}; // @[primitives.scala:118:30, :124:20] wire [1:0] reduced4CExtra_hi_lo = {reduced4CExtra_reducedVec_4, reduced4CExtra_reducedVec_3}; // @[primitives.scala:118:30, :124:20] wire [1:0] reduced4CExtra_hi_hi = {reduced4CExtra_reducedVec_6, reduced4CExtra_reducedVec_5}; // @[primitives.scala:118:30, :124:20] wire [3:0] reduced4CExtra_hi = {reduced4CExtra_hi_hi, reduced4CExtra_hi_lo}; // @[primitives.scala:124:20] wire [6:0] _reduced4CExtra_T_1 = {reduced4CExtra_hi, reduced4CExtra_lo}; // @[primitives.scala:124:20] wire [4:0] _reduced4CExtra_T_2 = CAlignDist[6:2]; // @[MulAddRecFN.scala:112:12, :124:28] wire [32:0] reduced4CExtra_shift = $signed(33'sh100000000 >>> _reduced4CExtra_T_2); // @[primitives.scala:76:56] wire [5:0] _reduced4CExtra_T_3 = reduced4CExtra_shift[19:14]; // @[primitives.scala:76:56, :78:22] wire [3:0] _reduced4CExtra_T_4 = _reduced4CExtra_T_3[3:0]; // @[primitives.scala:77:20, :78:22] wire [1:0] _reduced4CExtra_T_5 = _reduced4CExtra_T_4[1:0]; // @[primitives.scala:77:20] wire _reduced4CExtra_T_6 = _reduced4CExtra_T_5[0]; // @[primitives.scala:77:20] wire _reduced4CExtra_T_7 = _reduced4CExtra_T_5[1]; // @[primitives.scala:77:20] wire [1:0] _reduced4CExtra_T_8 = {_reduced4CExtra_T_6, _reduced4CExtra_T_7}; // @[primitives.scala:77:20] wire [1:0] _reduced4CExtra_T_9 = _reduced4CExtra_T_4[3:2]; // @[primitives.scala:77:20] wire _reduced4CExtra_T_10 = _reduced4CExtra_T_9[0]; // @[primitives.scala:77:20] wire _reduced4CExtra_T_11 = _reduced4CExtra_T_9[1]; // @[primitives.scala:77:20] wire [1:0] _reduced4CExtra_T_12 = {_reduced4CExtra_T_10, _reduced4CExtra_T_11}; // @[primitives.scala:77:20] wire [3:0] _reduced4CExtra_T_13 = {_reduced4CExtra_T_8, _reduced4CExtra_T_12}; // @[primitives.scala:77:20] wire [1:0] _reduced4CExtra_T_14 = _reduced4CExtra_T_3[5:4]; // @[primitives.scala:77:20, :78:22] wire _reduced4CExtra_T_15 = _reduced4CExtra_T_14[0]; // @[primitives.scala:77:20] wire _reduced4CExtra_T_16 = _reduced4CExtra_T_14[1]; // @[primitives.scala:77:20] wire [1:0] _reduced4CExtra_T_17 = {_reduced4CExtra_T_15, _reduced4CExtra_T_16}; // @[primitives.scala:77:20] wire [5:0] _reduced4CExtra_T_18 = {_reduced4CExtra_T_13, _reduced4CExtra_T_17}; // @[primitives.scala:77:20] wire [6:0] _reduced4CExtra_T_19 = {1'h0, _reduced4CExtra_T_1[5:0] & _reduced4CExtra_T_18}; // @[primitives.scala:77:20, :124:20] wire reduced4CExtra = |_reduced4CExtra_T_19; // @[MulAddRecFN.scala:122:68, :130:11] wire [74:0] _alignedSigC_T = mainAlignedSigC[77:3]; // @[MulAddRecFN.scala:120:100, :132:28] wire [74:0] alignedSigC_hi = _alignedSigC_T; // @[MulAddRecFN.scala:132:{12,28}] wire [2:0] _alignedSigC_T_1 = mainAlignedSigC[2:0]; // @[MulAddRecFN.scala:120:100, :134:32] wire [2:0] _alignedSigC_T_5 = mainAlignedSigC[2:0]; // @[MulAddRecFN.scala:120:100, :134:32, :135:32] wire _alignedSigC_T_2 = &_alignedSigC_T_1; // @[MulAddRecFN.scala:134:{32,39}] wire _alignedSigC_T_3 = ~reduced4CExtra; // @[MulAddRecFN.scala:130:11, :134:47] wire _alignedSigC_T_4 = _alignedSigC_T_2 & _alignedSigC_T_3; // @[MulAddRecFN.scala:134:{39,44,47}] wire _alignedSigC_T_6 = |_alignedSigC_T_5; // @[MulAddRecFN.scala:135:{32,39}] wire _alignedSigC_T_7 = _alignedSigC_T_6 | reduced4CExtra; // @[MulAddRecFN.scala:130:11, :135:{39,44}] wire _alignedSigC_T_8 = doSubMags ? _alignedSigC_T_4 : _alignedSigC_T_7; // @[MulAddRecFN.scala:102:42, :133:16, :134:44, :135:44] wire [75:0] alignedSigC = {alignedSigC_hi, _alignedSigC_T_8}; // @[MulAddRecFN.scala:132:12, :133:16] assign io_mulAddA_0 = rawA_sig[23:0]; // @[rawFloatFromRecFN.scala:55:23] assign io_mulAddB_0 = rawB_sig[23:0]; // @[rawFloatFromRecFN.scala:55:23] assign _io_mulAddC_T = alignedSigC[48:1]; // @[MulAddRecFN.scala:132:12, :143:30] assign io_mulAddC_0 = _io_mulAddC_T; // @[MulAddRecFN.scala:71:7, :143:30] wire _io_toPostMul_isSigNaNAny_T = rawA_sig[22]; // @[rawFloatFromRecFN.scala:55:23] wire _io_toPostMul_isSigNaNAny_T_1 = ~_io_toPostMul_isSigNaNAny_T; // @[common.scala:82:{49,56}] wire _io_toPostMul_isSigNaNAny_T_2 = rawA_isNaN & _io_toPostMul_isSigNaNAny_T_1; // @[rawFloatFromRecFN.scala:55:23] wire _io_toPostMul_isSigNaNAny_T_3 = rawB_sig[22]; // @[rawFloatFromRecFN.scala:55:23] wire _io_toPostMul_isSigNaNAny_T_4 = ~_io_toPostMul_isSigNaNAny_T_3; // @[common.scala:82:{49,56}] wire _io_toPostMul_isSigNaNAny_T_5 = rawB_isNaN & _io_toPostMul_isSigNaNAny_T_4; // @[rawFloatFromRecFN.scala:55:23] wire _io_toPostMul_isSigNaNAny_T_6 = _io_toPostMul_isSigNaNAny_T_2 | _io_toPostMul_isSigNaNAny_T_5; // @[common.scala:82:46] wire _io_toPostMul_isSigNaNAny_T_7 = rawC_sig[22]; // @[rawFloatFromRecFN.scala:55:23] wire _io_toPostMul_isSigNaNAny_T_8 = ~_io_toPostMul_isSigNaNAny_T_7; // @[common.scala:82:{49,56}] wire _io_toPostMul_isSigNaNAny_T_9 = rawC_isNaN & _io_toPostMul_isSigNaNAny_T_8; // @[rawFloatFromRecFN.scala:55:23] assign _io_toPostMul_isSigNaNAny_T_10 = _io_toPostMul_isSigNaNAny_T_6 | _io_toPostMul_isSigNaNAny_T_9; // @[common.scala:82:46] assign io_toPostMul_isSigNaNAny_0 = _io_toPostMul_isSigNaNAny_T_10; // @[MulAddRecFN.scala:71:7, :146:58] assign _io_toPostMul_isNaNAOrB_T = rawA_isNaN | rawB_isNaN; // @[rawFloatFromRecFN.scala:55:23] assign io_toPostMul_isNaNAOrB_0 = _io_toPostMul_isNaNAOrB_T; // @[MulAddRecFN.scala:71:7, :148:42] wire [11:0] _io_toPostMul_sExpSum_T = _GEN - 12'h18; // @[MulAddRecFN.scala:106:42, :158:53] wire [10:0] _io_toPostMul_sExpSum_T_1 = _io_toPostMul_sExpSum_T[10:0]; // @[MulAddRecFN.scala:158:53] wire [10:0] _io_toPostMul_sExpSum_T_2 = _io_toPostMul_sExpSum_T_1; // @[MulAddRecFN.scala:158:53] wire [10:0] _io_toPostMul_sExpSum_T_3 = CIsDominant ? {rawC_sExp[9], rawC_sExp} : _io_toPostMul_sExpSum_T_2; // @[rawFloatFromRecFN.scala:55:23] assign io_toPostMul_sExpSum_0 = _io_toPostMul_sExpSum_T_3[9:0]; // @[MulAddRecFN.scala:71:7, :157:28, :158:12] assign _io_toPostMul_CDom_CAlignDist_T = CAlignDist[4:0]; // @[MulAddRecFN.scala:112:12, :161:47] assign io_toPostMul_CDom_CAlignDist_0 = _io_toPostMul_CDom_CAlignDist_T; // @[MulAddRecFN.scala:71:7, :161:47] assign _io_toPostMul_highAlignedSigC_T = alignedSigC[74:49]; // @[MulAddRecFN.scala:132:12, :163:20] assign io_toPostMul_highAlignedSigC_0 = _io_toPostMul_highAlignedSigC_T; // @[MulAddRecFN.scala:71:7, :163:20] assign _io_toPostMul_bit0AlignedSigC_T = alignedSigC[0]; // @[MulAddRecFN.scala:132:12, :164:48] assign io_toPostMul_bit0AlignedSigC_0 = _io_toPostMul_bit0AlignedSigC_T; // @[MulAddRecFN.scala:71:7, :164:48] assign io_mulAddA = io_mulAddA_0; // @[MulAddRecFN.scala:71:7] assign io_mulAddB = io_mulAddB_0; // @[MulAddRecFN.scala:71:7] assign io_mulAddC = io_mulAddC_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isSigNaNAny = io_toPostMul_isSigNaNAny_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isNaNAOrB = io_toPostMul_isNaNAOrB_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isInfA = io_toPostMul_isInfA_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isZeroA = io_toPostMul_isZeroA_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isInfB = io_toPostMul_isInfB_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isZeroB = io_toPostMul_isZeroB_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_signProd = io_toPostMul_signProd_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isNaNC = io_toPostMul_isNaNC_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isInfC = io_toPostMul_isInfC_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_isZeroC = io_toPostMul_isZeroC_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_sExpSum = io_toPostMul_sExpSum_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_doSubMags = io_toPostMul_doSubMags_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_CIsDominant = io_toPostMul_CIsDominant_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_CDom_CAlignDist = io_toPostMul_CDom_CAlignDist_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_highAlignedSigC = io_toPostMul_highAlignedSigC_0; // @[MulAddRecFN.scala:71:7] assign io_toPostMul_bit0AlignedSigC = io_toPostMul_bit0AlignedSigC_0; // @[MulAddRecFN.scala:71:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftRegisterPriorityQueue.scala: package compressacc import chisel3._ import chisel3.util._ import chisel3.util._ // TODO : support enq & deq at the same cycle class PriorityQueueStageIO(keyWidth: Int, value: ValueInfo) extends Bundle { val output_prev = KeyValue(keyWidth, value) val output_nxt = KeyValue(keyWidth, value) val input_prev = Flipped(KeyValue(keyWidth, value)) val input_nxt = Flipped(KeyValue(keyWidth, value)) val cmd = Flipped(Valid(UInt(1.W))) val insert_here = Input(Bool()) val cur_input_keyval = Flipped(KeyValue(keyWidth, value)) val cur_output_keyval = KeyValue(keyWidth, value) } class PriorityQueueStage(keyWidth: Int, value: ValueInfo) extends Module { val io = IO(new PriorityQueueStageIO(keyWidth, value)) dontTouch(io) val CMD_DEQ = 0.U val CMD_ENQ = 1.U val MAX_VALUE = (1 << keyWidth) - 1 val key_reg = RegInit(MAX_VALUE.U(keyWidth.W)) val value_reg = Reg(value) io.output_prev.key := key_reg io.output_prev.value := value_reg io.output_nxt.key := key_reg io.output_nxt.value := value_reg io.cur_output_keyval.key := key_reg io.cur_output_keyval.value := value_reg when (io.cmd.valid) { switch (io.cmd.bits) { is (CMD_DEQ) { key_reg := io.input_nxt.key value_reg := io.input_nxt.value } is (CMD_ENQ) { when (io.insert_here) { key_reg := io.cur_input_keyval.key value_reg := io.cur_input_keyval.value } .elsewhen (key_reg >= io.cur_input_keyval.key) { key_reg := io.input_prev.key value_reg := io.input_prev.value } .otherwise { // do nothing } } } } } object PriorityQueueStage { def apply(keyWidth: Int, v: ValueInfo): PriorityQueueStage = new PriorityQueueStage(keyWidth, v) } // TODO // - This design is not scalable as the enqued_keyval is broadcasted to all the stages // - Add pipeline registers later class PriorityQueueIO(queSize: Int, keyWidth: Int, value: ValueInfo) extends Bundle { val cnt_bits = log2Ceil(queSize+1) val counter = Output(UInt(cnt_bits.W)) val enq = Flipped(Decoupled(KeyValue(keyWidth, value))) val deq = Decoupled(KeyValue(keyWidth, value)) } class PriorityQueue(queSize: Int, keyWidth: Int, value: ValueInfo) extends Module { val keyWidthInternal = keyWidth + 1 val CMD_DEQ = 0.U val CMD_ENQ = 1.U val io = IO(new PriorityQueueIO(queSize, keyWidthInternal, value)) dontTouch(io) val MAX_VALUE = ((1 << keyWidthInternal) - 1).U val cnt_bits = log2Ceil(queSize+1) // do not consider cases where we are inserting more entries then the queSize val counter = RegInit(0.U(cnt_bits.W)) io.counter := counter val full = (counter === queSize.U) val empty = (counter === 0.U) io.deq.valid := !empty io.enq.ready := !full when (io.enq.fire) { counter := counter + 1.U } when (io.deq.fire) { counter := counter - 1.U } val cmd_valid = io.enq.valid || io.deq.ready val cmd = Mux(io.enq.valid, CMD_ENQ, CMD_DEQ) assert(!(io.enq.valid && io.deq.ready)) val stages = Seq.fill(queSize)(Module(new PriorityQueueStage(keyWidthInternal, value))) for (i <- 0 until (queSize - 1)) { stages(i+1).io.input_prev <> stages(i).io.output_nxt stages(i).io.input_nxt <> stages(i+1).io.output_prev } stages(queSize-1).io.input_nxt.key := MAX_VALUE // stages(queSize-1).io.input_nxt.value := stages(queSize-1).io.input_nxt.value.symbol := 0.U // stages(queSize-1).io.input_nxt.value.child(0) := 0.U // stages(queSize-1).io.input_nxt.value.child(1) := 0.U stages(0).io.input_prev.key := io.enq.bits.key stages(0).io.input_prev.value <> io.enq.bits.value for (i <- 0 until queSize) { stages(i).io.cmd.valid := cmd_valid stages(i).io.cmd.bits := cmd stages(i).io.cur_input_keyval <> io.enq.bits } val is_large_or_equal = WireInit(VecInit(Seq.fill(queSize)(false.B))) for (i <- 0 until queSize) { is_large_or_equal(i) := (stages(i).io.cur_output_keyval.key >= io.enq.bits.key) } val is_large_or_equal_cat = Wire(UInt(queSize.W)) is_large_or_equal_cat := Cat(is_large_or_equal.reverse) val insert_here_idx = PriorityEncoder(is_large_or_equal_cat) for (i <- 0 until queSize) { when (i.U === insert_here_idx) { stages(i).io.insert_here := true.B } .otherwise { stages(i).io.insert_here := false.B } } io.deq.bits <> stages(0).io.output_prev }
module PriorityQueueStage_180( // @[ShiftRegisterPriorityQueue.scala:21:7] input clock, // @[ShiftRegisterPriorityQueue.scala:21:7] input reset, // @[ShiftRegisterPriorityQueue.scala:21:7] output [30:0] io_output_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_output_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_output_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_prev_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_prev_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_input_nxt_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_input_nxt_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_valid, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_cmd_bits, // @[ShiftRegisterPriorityQueue.scala:22:14] input io_insert_here, // @[ShiftRegisterPriorityQueue.scala:22:14] input [30:0] io_cur_input_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] input [9:0] io_cur_input_keyval_value_symbol, // @[ShiftRegisterPriorityQueue.scala:22:14] output [30:0] io_cur_output_keyval_key, // @[ShiftRegisterPriorityQueue.scala:22:14] output [9:0] io_cur_output_keyval_value_symbol // @[ShiftRegisterPriorityQueue.scala:22:14] ); wire [30:0] io_input_prev_key_0 = io_input_prev_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_prev_value_symbol_0 = io_input_prev_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_input_nxt_key_0 = io_input_nxt_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_input_nxt_value_symbol_0 = io_input_nxt_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_valid_0 = io_cmd_valid; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_cmd_bits_0 = io_cmd_bits; // @[ShiftRegisterPriorityQueue.scala:21:7] wire io_insert_here_0 = io_insert_here; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_input_keyval_key_0 = io_cur_input_keyval_key; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_input_keyval_value_symbol_0 = io_cur_input_keyval_value_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [9:0] io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] wire [30:0] io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] reg [30:0] key_reg; // @[ShiftRegisterPriorityQueue.scala:30:24] assign io_output_prev_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_output_nxt_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] assign io_cur_output_keyval_key_0 = key_reg; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] reg [9:0] value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:31:22] assign io_output_prev_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_output_nxt_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] assign io_cur_output_keyval_value_symbol_0 = value_reg_symbol; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] wire _T_2 = key_reg >= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24, :52:30] always @(posedge clock) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (reset) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= 31'h7FFFFFFF; // @[ShiftRegisterPriorityQueue.scala:30:24] else if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_cur_input_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] key_reg <= io_input_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end else // @[ShiftRegisterPriorityQueue.scala:21:7] key_reg <= io_input_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :30:24] end if (io_cmd_valid_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_cmd_bits_0) begin // @[ShiftRegisterPriorityQueue.scala:21:7] if (io_insert_here_0) // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_cur_input_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] else if (_T_2) // @[ShiftRegisterPriorityQueue.scala:52:30] value_reg_symbol <= io_input_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end else // @[ShiftRegisterPriorityQueue.scala:21:7] value_reg_symbol <= io_input_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7, :31:22] end always @(posedge) assign io_output_prev_key = io_output_prev_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_prev_value_symbol = io_output_prev_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_key = io_output_nxt_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_output_nxt_value_symbol = io_output_nxt_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_key = io_cur_output_keyval_key_0; // @[ShiftRegisterPriorityQueue.scala:21:7] assign io_cur_output_keyval_value_symbol = io_cur_output_keyval_value_symbol_0; // @[ShiftRegisterPriorityQueue.scala:21:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ClockDomain.scala: package freechips.rocketchip.prci import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing { def clockBundle: ClockBundle lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { childClock := clockBundle.clock childReset := clockBundle.reset override def provideImplicitClockToLazyChildren = true // these are just for backwards compatibility with external devices // that were manually wiring themselves to the domain's clock/reset input: val clock = IO(Output(chiselTypeOf(clockBundle.clock))) val reset = IO(Output(chiselTypeOf(clockBundle.reset))) clock := clockBundle.clock reset := clockBundle.reset } } abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain { def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name)) val clockNode = ClockSinkNode(Seq(clockSinkParams)) def clockBundle = clockNode.in.head._1 override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString } class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain { def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name)) val clockNode = ClockSourceNode(Seq(clockSourceParams)) def clockBundle = clockNode.out.head._1 override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString } abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File NoC.scala: package constellation.noc import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy.{LazyModule, LazyModuleImp, BundleBridgeSink, InModuleBody} import freechips.rocketchip.util.ElaborationArtefacts import freechips.rocketchip.prci._ import constellation.router._ import constellation.channel._ import constellation.routing.{RoutingRelation, ChannelRoutingInfo} import constellation.topology.{PhysicalTopology, UnidirectionalLine} class NoCTerminalIO( val ingressParams: Seq[IngressChannelParams], val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle { val ingress = MixedVec(ingressParams.map { u => Flipped(new IngressChannel(u)) }) val egress = MixedVec(egressParams.map { u => new EgressChannel(u) }) } class NoC(nocParams: NoCParams)(implicit p: Parameters) extends LazyModule { override def shouldBeInlined = nocParams.inlineNoC val internalParams = InternalNoCParams(nocParams) val allChannelParams = internalParams.channelParams val allIngressParams = internalParams.ingressParams val allEgressParams = internalParams.egressParams val allRouterParams = internalParams.routerParams val iP = p.alterPartial({ case InternalNoCKey => internalParams }) val nNodes = nocParams.topology.nNodes val nocName = nocParams.nocName val skipValidationChecks = nocParams.skipValidationChecks val clockSourceNodes = Seq.tabulate(nNodes) { i => ClockSourceNode(Seq(ClockSourceParameters())) } val router_sink_domains = Seq.tabulate(nNodes) { i => val router_sink_domain = LazyModule(new ClockSinkDomain(ClockSinkParameters( name = Some(s"${nocName}_router_$i") ))) router_sink_domain.clockNode := clockSourceNodes(i) router_sink_domain } val routers = Seq.tabulate(nNodes) { i => router_sink_domains(i) { val inParams = allChannelParams.filter(_.destId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val outParams = allChannelParams.filter(_.srcId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val ingressParams = allIngressParams.filter(_.destId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val egressParams = allEgressParams.filter(_.srcId == i).map( _.copy(payloadBits=allRouterParams(i).user.payloadBits) ) val noIn = inParams.size + ingressParams.size == 0 val noOut = outParams.size + egressParams.size == 0 if (noIn || noOut) { println(s"Constellation WARNING: $nocName router $i seems to be unused, it will not be generated") None } else { Some(LazyModule(new Router( routerParams = allRouterParams(i), preDiplomaticInParams = inParams, preDiplomaticIngressParams = ingressParams, outDests = outParams.map(_.destId), egressIds = egressParams.map(_.egressId) )(iP))) } }}.flatten val ingressNodes = allIngressParams.map { u => IngressChannelSourceNode(u.destId) } val egressNodes = allEgressParams.map { u => EgressChannelDestNode(u) } // Generate channels between routers diplomatically Seq.tabulate(nNodes, nNodes) { case (i, j) => if (i != j) { val routerI = routers.find(_.nodeId == i) val routerJ = routers.find(_.nodeId == j) if (routerI.isDefined && routerJ.isDefined) { val sourceNodes: Seq[ChannelSourceNode] = routerI.get.sourceNodes.filter(_.destId == j) val destNodes: Seq[ChannelDestNode] = routerJ.get.destNodes.filter(_.destParams.srcId == i) require (sourceNodes.size == destNodes.size) (sourceNodes zip destNodes).foreach { case (src, dst) => val channelParam = allChannelParams.find(c => c.srcId == i && c.destId == j).get router_sink_domains(j) { implicit val p: Parameters = iP (dst := ChannelWidthWidget(routerJ.get.payloadBits, routerI.get.payloadBits) := channelParam.channelGen(p)(src) ) } } } }} // Generate terminal channels diplomatically routers.foreach { dst => router_sink_domains(dst.nodeId) { implicit val p: Parameters = iP dst.ingressNodes.foreach(n => { val ingressId = n.destParams.ingressId require(dst.payloadBits <= allIngressParams(ingressId).payloadBits) (n := IngressWidthWidget(dst.payloadBits, allIngressParams(ingressId).payloadBits) := ingressNodes(ingressId) ) }) dst.egressNodes.foreach(n => { val egressId = n.egressId require(dst.payloadBits <= allEgressParams(egressId).payloadBits) (egressNodes(egressId) := EgressWidthWidget(allEgressParams(egressId).payloadBits, dst.payloadBits) := n ) }) }} val debugNodes = routers.map { r => val sink = BundleBridgeSink[DebugBundle]() sink := r.debugNode sink } val ctrlNodes = if (nocParams.hasCtrl) { (0 until nNodes).map { i => routers.find(_.nodeId == i).map { r => val sink = BundleBridgeSink[RouterCtrlBundle]() sink := r.ctrlNode.get sink } } } else { Nil } println(s"Constellation: $nocName Finished parameter validation") lazy val module = new Impl class Impl extends LazyModuleImp(this) { println(s"Constellation: $nocName Starting NoC RTL generation") val io = IO(new NoCTerminalIO(allIngressParams, allEgressParams)(iP) { val router_clocks = Vec(nNodes, Input(new ClockBundle(ClockBundleParameters()))) val router_ctrl = if (nocParams.hasCtrl) Vec(nNodes, new RouterCtrlBundle) else Nil }) (io.ingress zip ingressNodes.map(_.out(0)._1)).foreach { case (l,r) => r <> l } (io.egress zip egressNodes .map(_.in (0)._1)).foreach { case (l,r) => l <> r } (io.router_clocks zip clockSourceNodes.map(_.out(0)._1)).foreach { case (l,r) => l <> r } if (nocParams.hasCtrl) { ctrlNodes.zipWithIndex.map { case (c,i) => if (c.isDefined) { io.router_ctrl(i) <> c.get.in(0)._1 } else { io.router_ctrl(i) <> DontCare } } } // TODO: These assume a single clock-domain across the entire noc val debug_va_stall_ctr = RegInit(0.U(64.W)) val debug_sa_stall_ctr = RegInit(0.U(64.W)) val debug_any_stall_ctr = debug_va_stall_ctr + debug_sa_stall_ctr debug_va_stall_ctr := debug_va_stall_ctr + debugNodes.map(_.in(0)._1.va_stall.reduce(_+_)).reduce(_+_) debug_sa_stall_ctr := debug_sa_stall_ctr + debugNodes.map(_.in(0)._1.sa_stall.reduce(_+_)).reduce(_+_) dontTouch(debug_va_stall_ctr) dontTouch(debug_sa_stall_ctr) dontTouch(debug_any_stall_ctr) def prepend(s: String) = Seq(nocName, s).mkString(".") ElaborationArtefacts.add(prepend("noc.graphml"), graphML) val adjList = routers.map { r => val outs = r.outParams.map(o => s"${o.destId}").mkString(" ") val egresses = r.egressParams.map(e => s"e${e.egressId}").mkString(" ") val ingresses = r.ingressParams.map(i => s"i${i.ingressId} ${r.nodeId}") (Seq(s"${r.nodeId} $outs $egresses") ++ ingresses).mkString("\n") }.mkString("\n") ElaborationArtefacts.add(prepend("noc.adjlist"), adjList) val xys = routers.map(r => { val n = r.nodeId val ids = (Seq(r.nodeId.toString) ++ r.egressParams.map(e => s"e${e.egressId}") ++ r.ingressParams.map(i => s"i${i.ingressId}") ) val plotter = nocParams.topology.plotter val coords = (Seq(plotter.node(r.nodeId)) ++ Seq.tabulate(r.egressParams.size ) { i => plotter. egress(i, r. egressParams.size, r.nodeId) } ++ Seq.tabulate(r.ingressParams.size) { i => plotter.ingress(i, r.ingressParams.size, r.nodeId) } ) (ids zip coords).map { case (i, (x, y)) => s"$i $x $y" }.mkString("\n") }).mkString("\n") ElaborationArtefacts.add(prepend("noc.xy"), xys) val edgeProps = routers.map { r => val outs = r.outParams.map { o => (Seq(s"${r.nodeId} ${o.destId}") ++ (if (o.possibleFlows.size == 0) Some("unused") else None)) .mkString(" ") } val egresses = r.egressParams.map { e => (Seq(s"${r.nodeId} e${e.egressId}") ++ (if (e.possibleFlows.size == 0) Some("unused") else None)) .mkString(" ") } val ingresses = r.ingressParams.map { i => (Seq(s"i${i.ingressId} ${r.nodeId}") ++ (if (i.possibleFlows.size == 0) Some("unused") else None)) .mkString(" ") } (outs ++ egresses ++ ingresses).mkString("\n") }.mkString("\n") ElaborationArtefacts.add(prepend("noc.edgeprops"), edgeProps) println(s"Constellation: $nocName Finished NoC RTL generation") } }
module TLNoC_2_router_3ClockSinkDomain( // @[ClockDomain.scala:14:9] output [3:0] auto_routers_debug_out_va_stall_0, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_debug_out_va_stall_2, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_debug_out_sa_stall_0, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_debug_out_sa_stall_2, // @[LazyModuleImp.scala:107:25] input auto_routers_egress_nodes_out_2_flit_ready, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_2_flit_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_2_flit_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_2_flit_bits_tail, // @[LazyModuleImp.scala:107:25] input auto_routers_egress_nodes_out_1_flit_ready, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_1_flit_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_1_flit_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25] input auto_routers_egress_nodes_out_0_flit_ready, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_0_flit_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_0_flit_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_egress_nodes_out_0_flit_bits_tail, // @[LazyModuleImp.scala:107:25] output [72:0] auto_routers_egress_nodes_out_0_flit_bits_payload, // @[LazyModuleImp.scala:107:25] output auto_routers_ingress_nodes_in_1_flit_ready, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_1_flit_valid, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_1_flit_bits_head, // @[LazyModuleImp.scala:107:25] input auto_routers_ingress_nodes_in_1_flit_bits_tail, // @[LazyModuleImp.scala:107:25] input [72:0] auto_routers_ingress_nodes_in_1_flit_bits_payload, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_ingress_nodes_in_1_flit_bits_egress_id, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_flit_0_valid, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] output auto_routers_source_nodes_out_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] output [72:0] auto_routers_source_nodes_out_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_source_nodes_out_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] output [1:0] auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_source_nodes_out_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] output [2:0] auto_routers_source_nodes_out_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] output [3:0] auto_routers_source_nodes_out_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] input [9:0] auto_routers_source_nodes_out_credit_return, // @[LazyModuleImp.scala:107:25] input [9:0] auto_routers_source_nodes_out_vc_free, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_flit_0_valid, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_flit_0_bits_head, // @[LazyModuleImp.scala:107:25] input auto_routers_dest_nodes_in_flit_0_bits_tail, // @[LazyModuleImp.scala:107:25] input [72:0] auto_routers_dest_nodes_in_flit_0_bits_payload, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_dest_nodes_in_flit_0_bits_flow_vnet_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node, // @[LazyModuleImp.scala:107:25] input [1:0] auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node, // @[LazyModuleImp.scala:107:25] input [2:0] auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node_id, // @[LazyModuleImp.scala:107:25] input [3:0] auto_routers_dest_nodes_in_flit_0_bits_virt_channel_id, // @[LazyModuleImp.scala:107:25] output [9:0] auto_routers_dest_nodes_in_credit_return, // @[LazyModuleImp.scala:107:25] output [9:0] auto_routers_dest_nodes_in_vc_free, // @[LazyModuleImp.scala:107:25] input auto_clock_in_clock, // @[LazyModuleImp.scala:107:25] input auto_clock_in_reset // @[LazyModuleImp.scala:107:25] ); Router_50 routers ( // @[NoC.scala:67:22] .clock (auto_clock_in_clock), .reset (auto_clock_in_reset), .auto_debug_out_va_stall_0 (auto_routers_debug_out_va_stall_0), .auto_debug_out_va_stall_2 (auto_routers_debug_out_va_stall_2), .auto_debug_out_sa_stall_0 (auto_routers_debug_out_sa_stall_0), .auto_debug_out_sa_stall_2 (auto_routers_debug_out_sa_stall_2), .auto_egress_nodes_out_2_flit_ready (auto_routers_egress_nodes_out_2_flit_ready), .auto_egress_nodes_out_2_flit_valid (auto_routers_egress_nodes_out_2_flit_valid), .auto_egress_nodes_out_2_flit_bits_head (auto_routers_egress_nodes_out_2_flit_bits_head), .auto_egress_nodes_out_2_flit_bits_tail (auto_routers_egress_nodes_out_2_flit_bits_tail), .auto_egress_nodes_out_1_flit_ready (auto_routers_egress_nodes_out_1_flit_ready), .auto_egress_nodes_out_1_flit_valid (auto_routers_egress_nodes_out_1_flit_valid), .auto_egress_nodes_out_1_flit_bits_head (auto_routers_egress_nodes_out_1_flit_bits_head), .auto_egress_nodes_out_1_flit_bits_tail (auto_routers_egress_nodes_out_1_flit_bits_tail), .auto_egress_nodes_out_0_flit_ready (auto_routers_egress_nodes_out_0_flit_ready), .auto_egress_nodes_out_0_flit_valid (auto_routers_egress_nodes_out_0_flit_valid), .auto_egress_nodes_out_0_flit_bits_head (auto_routers_egress_nodes_out_0_flit_bits_head), .auto_egress_nodes_out_0_flit_bits_tail (auto_routers_egress_nodes_out_0_flit_bits_tail), .auto_egress_nodes_out_0_flit_bits_payload (auto_routers_egress_nodes_out_0_flit_bits_payload), .auto_ingress_nodes_in_1_flit_ready (auto_routers_ingress_nodes_in_1_flit_ready), .auto_ingress_nodes_in_1_flit_valid (auto_routers_ingress_nodes_in_1_flit_valid), .auto_ingress_nodes_in_1_flit_bits_head (auto_routers_ingress_nodes_in_1_flit_bits_head), .auto_ingress_nodes_in_1_flit_bits_tail (auto_routers_ingress_nodes_in_1_flit_bits_tail), .auto_ingress_nodes_in_1_flit_bits_payload (auto_routers_ingress_nodes_in_1_flit_bits_payload), .auto_ingress_nodes_in_1_flit_bits_egress_id (auto_routers_ingress_nodes_in_1_flit_bits_egress_id), .auto_source_nodes_out_flit_0_valid (auto_routers_source_nodes_out_flit_0_valid), .auto_source_nodes_out_flit_0_bits_head (auto_routers_source_nodes_out_flit_0_bits_head), .auto_source_nodes_out_flit_0_bits_tail (auto_routers_source_nodes_out_flit_0_bits_tail), .auto_source_nodes_out_flit_0_bits_payload (auto_routers_source_nodes_out_flit_0_bits_payload), .auto_source_nodes_out_flit_0_bits_flow_vnet_id (auto_routers_source_nodes_out_flit_0_bits_flow_vnet_id), .auto_source_nodes_out_flit_0_bits_flow_ingress_node (auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node), .auto_source_nodes_out_flit_0_bits_flow_ingress_node_id (auto_routers_source_nodes_out_flit_0_bits_flow_ingress_node_id), .auto_source_nodes_out_flit_0_bits_flow_egress_node (auto_routers_source_nodes_out_flit_0_bits_flow_egress_node), .auto_source_nodes_out_flit_0_bits_flow_egress_node_id (auto_routers_source_nodes_out_flit_0_bits_flow_egress_node_id), .auto_source_nodes_out_flit_0_bits_virt_channel_id (auto_routers_source_nodes_out_flit_0_bits_virt_channel_id), .auto_source_nodes_out_credit_return (auto_routers_source_nodes_out_credit_return), .auto_source_nodes_out_vc_free (auto_routers_source_nodes_out_vc_free), .auto_dest_nodes_in_flit_0_valid (auto_routers_dest_nodes_in_flit_0_valid), .auto_dest_nodes_in_flit_0_bits_head (auto_routers_dest_nodes_in_flit_0_bits_head), .auto_dest_nodes_in_flit_0_bits_tail (auto_routers_dest_nodes_in_flit_0_bits_tail), .auto_dest_nodes_in_flit_0_bits_payload (auto_routers_dest_nodes_in_flit_0_bits_payload), .auto_dest_nodes_in_flit_0_bits_flow_vnet_id (auto_routers_dest_nodes_in_flit_0_bits_flow_vnet_id), .auto_dest_nodes_in_flit_0_bits_flow_ingress_node (auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node), .auto_dest_nodes_in_flit_0_bits_flow_ingress_node_id (auto_routers_dest_nodes_in_flit_0_bits_flow_ingress_node_id), .auto_dest_nodes_in_flit_0_bits_flow_egress_node (auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node), .auto_dest_nodes_in_flit_0_bits_flow_egress_node_id (auto_routers_dest_nodes_in_flit_0_bits_flow_egress_node_id), .auto_dest_nodes_in_flit_0_bits_virt_channel_id (auto_routers_dest_nodes_in_flit_0_bits_virt_channel_id), .auto_dest_nodes_in_credit_return (auto_routers_dest_nodes_in_credit_return), .auto_dest_nodes_in_vc_free (auto_routers_dest_nodes_in_vc_free) ); // @[NoC.scala:67:22] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_70( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_80 io_out_source_extend ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File tage.scala: package boom.v3.ifu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import boom.v3.common._ import boom.v3.util.{BoomCoreStringPrefix, MaskLower, WrapInc} import scala.math.min class TageResp extends Bundle { val ctr = UInt(3.W) val u = UInt(2.W) } class TageTable(val nRows: Int, val tagSz: Int, val histLength: Int, val uBitPeriod: Int) (implicit p: Parameters) extends BoomModule()(p) with HasBoomFrontendParameters { require(histLength <= globalHistoryLength) val nWrBypassEntries = 2 val io = IO( new Bundle { val f1_req_valid = Input(Bool()) val f1_req_pc = Input(UInt(vaddrBitsExtended.W)) val f1_req_ghist = Input(UInt(globalHistoryLength.W)) val f3_resp = Output(Vec(bankWidth, Valid(new TageResp))) val update_mask = Input(Vec(bankWidth, Bool())) val update_taken = Input(Vec(bankWidth, Bool())) val update_alloc = Input(Vec(bankWidth, Bool())) val update_old_ctr = Input(Vec(bankWidth, UInt(3.W))) val update_pc = Input(UInt()) val update_hist = Input(UInt()) val update_u_mask = Input(Vec(bankWidth, Bool())) val update_u = Input(Vec(bankWidth, UInt(2.W))) }) def compute_folded_hist(hist: UInt, l: Int) = { val nChunks = (histLength + l - 1) / l val hist_chunks = (0 until nChunks) map {i => hist(min((i+1)*l, histLength)-1, i*l) } hist_chunks.reduce(_^_) } def compute_tag_and_hash(unhashed_idx: UInt, hist: UInt) = { val idx_history = compute_folded_hist(hist, log2Ceil(nRows)) val idx = (unhashed_idx ^ idx_history)(log2Ceil(nRows)-1,0) val tag_history = compute_folded_hist(hist, tagSz) val tag = ((unhashed_idx >> log2Ceil(nRows)) ^ tag_history)(tagSz-1,0) (idx, tag) } def inc_ctr(ctr: UInt, taken: Bool): UInt = { Mux(!taken, Mux(ctr === 0.U, 0.U, ctr - 1.U), Mux(ctr === 7.U, 7.U, ctr + 1.U)) } val doing_reset = RegInit(true.B) val reset_idx = RegInit(0.U(log2Ceil(nRows).W)) reset_idx := reset_idx + doing_reset when (reset_idx === (nRows-1).U) { doing_reset := false.B } class TageEntry extends Bundle { val valid = Bool() // TODO: Remove this valid bit val tag = UInt(tagSz.W) val ctr = UInt(3.W) } val tageEntrySz = 1 + tagSz + 3 val (s1_hashed_idx, s1_tag) = compute_tag_and_hash(fetchIdx(io.f1_req_pc), io.f1_req_ghist) val hi_us = SyncReadMem(nRows, Vec(bankWidth, Bool())) val lo_us = SyncReadMem(nRows, Vec(bankWidth, Bool())) val table = SyncReadMem(nRows, Vec(bankWidth, UInt(tageEntrySz.W))) val mems = Seq((f"tage_l$histLength", nRows, bankWidth * tageEntrySz)) val s2_tag = RegNext(s1_tag) val s2_req_rtage = VecInit(table.read(s1_hashed_idx, io.f1_req_valid).map(_.asTypeOf(new TageEntry))) val s2_req_rhius = hi_us.read(s1_hashed_idx, io.f1_req_valid) val s2_req_rlous = lo_us.read(s1_hashed_idx, io.f1_req_valid) val s2_req_rhits = VecInit(s2_req_rtage.map(e => e.valid && e.tag === s2_tag && !doing_reset)) for (w <- 0 until bankWidth) { // This bit indicates the TAGE table matched here io.f3_resp(w).valid := RegNext(s2_req_rhits(w)) io.f3_resp(w).bits.u := RegNext(Cat(s2_req_rhius(w), s2_req_rlous(w))) io.f3_resp(w).bits.ctr := RegNext(s2_req_rtage(w).ctr) } val clear_u_ctr = RegInit(0.U((log2Ceil(uBitPeriod) + log2Ceil(nRows) + 1).W)) when (doing_reset) { clear_u_ctr := 1.U } .otherwise { clear_u_ctr := clear_u_ctr + 1.U } val doing_clear_u = clear_u_ctr(log2Ceil(uBitPeriod)-1,0) === 0.U val doing_clear_u_hi = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 1.U val doing_clear_u_lo = doing_clear_u && clear_u_ctr(log2Ceil(uBitPeriod) + log2Ceil(nRows)) === 0.U val clear_u_idx = clear_u_ctr >> log2Ceil(uBitPeriod) val (update_idx, update_tag) = compute_tag_and_hash(fetchIdx(io.update_pc), io.update_hist) val update_wdata = Wire(Vec(bankWidth, new TageEntry)) table.write( Mux(doing_reset, reset_idx , update_idx), Mux(doing_reset, VecInit(Seq.fill(bankWidth) { 0.U(tageEntrySz.W) }), VecInit(update_wdata.map(_.asUInt))), Mux(doing_reset, ~(0.U(bankWidth.W)) , io.update_mask.asUInt).asBools ) val update_hi_wdata = Wire(Vec(bankWidth, Bool())) hi_us.write( Mux(doing_reset, reset_idx, Mux(doing_clear_u_hi, clear_u_idx, update_idx)), Mux(doing_reset || doing_clear_u_hi, VecInit((0.U(bankWidth.W)).asBools), update_hi_wdata), Mux(doing_reset || doing_clear_u_hi, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools ) val update_lo_wdata = Wire(Vec(bankWidth, Bool())) lo_us.write( Mux(doing_reset, reset_idx, Mux(doing_clear_u_lo, clear_u_idx, update_idx)), Mux(doing_reset || doing_clear_u_lo, VecInit((0.U(bankWidth.W)).asBools), update_lo_wdata), Mux(doing_reset || doing_clear_u_lo, ~(0.U(bankWidth.W)), io.update_u_mask.asUInt).asBools ) val wrbypass_tags = Reg(Vec(nWrBypassEntries, UInt(tagSz.W))) val wrbypass_idxs = Reg(Vec(nWrBypassEntries, UInt(log2Ceil(nRows).W))) val wrbypass = Reg(Vec(nWrBypassEntries, Vec(bankWidth, UInt(3.W)))) val wrbypass_enq_idx = RegInit(0.U(log2Ceil(nWrBypassEntries).W)) val wrbypass_hits = VecInit((0 until nWrBypassEntries) map { i => !doing_reset && wrbypass_tags(i) === update_tag && wrbypass_idxs(i) === update_idx }) val wrbypass_hit = wrbypass_hits.reduce(_||_) val wrbypass_hit_idx = PriorityEncoder(wrbypass_hits) for (w <- 0 until bankWidth) { update_wdata(w).ctr := Mux(io.update_alloc(w), Mux(io.update_taken(w), 4.U, 3.U ), Mux(wrbypass_hit, inc_ctr(wrbypass(wrbypass_hit_idx)(w), io.update_taken(w)), inc_ctr(io.update_old_ctr(w), io.update_taken(w)) ) ) update_wdata(w).valid := true.B update_wdata(w).tag := update_tag update_hi_wdata(w) := io.update_u(w)(1) update_lo_wdata(w) := io.update_u(w)(0) } when (io.update_mask.reduce(_||_)) { when (wrbypass_hits.reduce(_||_)) { wrbypass(wrbypass_hit_idx) := VecInit(update_wdata.map(_.ctr)) } .otherwise { wrbypass (wrbypass_enq_idx) := VecInit(update_wdata.map(_.ctr)) wrbypass_tags(wrbypass_enq_idx) := update_tag wrbypass_idxs(wrbypass_enq_idx) := update_idx wrbypass_enq_idx := WrapInc(wrbypass_enq_idx, nWrBypassEntries) } } } case class BoomTageParams( // nSets, histLen, tagSz tableInfo: Seq[Tuple3[Int, Int, Int]] = Seq(( 128, 2, 7), ( 128, 4, 7), ( 256, 8, 8), ( 256, 16, 8), ( 128, 32, 9), ( 128, 64, 9)), uBitPeriod: Int = 2048 ) class TageBranchPredictorBank(params: BoomTageParams = BoomTageParams())(implicit p: Parameters) extends BranchPredictorBank()(p) { val tageUBitPeriod = params.uBitPeriod val tageNTables = params.tableInfo.size class TageMeta extends Bundle { val provider = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W))) val alt_differs = Vec(bankWidth, Output(Bool())) val provider_u = Vec(bankWidth, Output(UInt(2.W))) val provider_ctr = Vec(bankWidth, Output(UInt(3.W))) val allocate = Vec(bankWidth, Valid(UInt(log2Ceil(tageNTables).W))) } val f3_meta = Wire(new TageMeta) override val metaSz = f3_meta.asUInt.getWidth require(metaSz <= bpdMaxMetaLength) def inc_u(u: UInt, alt_differs: Bool, mispredict: Bool): UInt = { Mux(!alt_differs, u, Mux(mispredict, Mux(u === 0.U, 0.U, u - 1.U), Mux(u === 3.U, 3.U, u + 1.U))) } val tt = params.tableInfo map { case (n, l, s) => { val t = Module(new TageTable(n, s, l, params.uBitPeriod)) t.io.f1_req_valid := RegNext(io.f0_valid) t.io.f1_req_pc := RegNext(io.f0_pc) t.io.f1_req_ghist := io.f1_ghist (t, t.mems) } } val tables = tt.map(_._1) val mems = tt.map(_._2).flatten val f3_resps = VecInit(tables.map(_.io.f3_resp)) val s1_update_meta = s1_update.bits.meta.asTypeOf(new TageMeta) val s1_update_mispredict_mask = UIntToOH(s1_update.bits.cfi_idx.bits) & Fill(bankWidth, s1_update.bits.cfi_mispredicted) val s1_update_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, Bool())))) val s1_update_u_mask = WireInit((0.U).asTypeOf(Vec(tageNTables, Vec(bankWidth, UInt(1.W))))) val s1_update_taken = Wire(Vec(tageNTables, Vec(bankWidth, Bool()))) val s1_update_old_ctr = Wire(Vec(tageNTables, Vec(bankWidth, UInt(3.W)))) val s1_update_alloc = Wire(Vec(tageNTables, Vec(bankWidth, Bool()))) val s1_update_u = Wire(Vec(tageNTables, Vec(bankWidth, UInt(2.W)))) s1_update_taken := DontCare s1_update_old_ctr := DontCare s1_update_alloc := DontCare s1_update_u := DontCare for (w <- 0 until bankWidth) { var altpred = io.resp_in(0).f3(w).taken val final_altpred = WireInit(io.resp_in(0).f3(w).taken) var provided = false.B var provider = 0.U io.resp.f3(w).taken := io.resp_in(0).f3(w).taken for (i <- 0 until tageNTables) { val hit = f3_resps(i)(w).valid val ctr = f3_resps(i)(w).bits.ctr when (hit) { io.resp.f3(w).taken := Mux(ctr === 3.U || ctr === 4.U, altpred, ctr(2)) final_altpred := altpred } provided = provided || hit provider = Mux(hit, i.U, provider) altpred = Mux(hit, f3_resps(i)(w).bits.ctr(2), altpred) } f3_meta.provider(w).valid := provided f3_meta.provider(w).bits := provider f3_meta.alt_differs(w) := final_altpred =/= io.resp.f3(w).taken f3_meta.provider_u(w) := f3_resps(provider)(w).bits.u f3_meta.provider_ctr(w) := f3_resps(provider)(w).bits.ctr // Create a mask of tables which did not hit our query, and also contain useless entries // and also uses a longer history than the provider val allocatable_slots = ( VecInit(f3_resps.map(r => !r(w).valid && r(w).bits.u === 0.U)).asUInt & ~(MaskLower(UIntToOH(provider)) & Fill(tageNTables, provided)) ) val alloc_lfsr = random.LFSR(tageNTables max 2) val first_entry = PriorityEncoder(allocatable_slots) val masked_entry = PriorityEncoder(allocatable_slots & alloc_lfsr) val alloc_entry = Mux(allocatable_slots(masked_entry), masked_entry, first_entry) f3_meta.allocate(w).valid := allocatable_slots =/= 0.U f3_meta.allocate(w).bits := alloc_entry val update_was_taken = (s1_update.bits.cfi_idx.valid && (s1_update.bits.cfi_idx.bits === w.U) && s1_update.bits.cfi_taken) when (s1_update.bits.br_mask(w) && s1_update.valid && s1_update.bits.is_commit_update) { when (s1_update_meta.provider(w).valid) { val provider = s1_update_meta.provider(w).bits s1_update_mask(provider)(w) := true.B s1_update_u_mask(provider)(w) := true.B val new_u = inc_u(s1_update_meta.provider_u(w), s1_update_meta.alt_differs(w), s1_update_mispredict_mask(w)) s1_update_u (provider)(w) := new_u s1_update_taken (provider)(w) := update_was_taken s1_update_old_ctr(provider)(w) := s1_update_meta.provider_ctr(w) s1_update_alloc (provider)(w) := false.B } } } when (s1_update.valid && s1_update.bits.is_commit_update && s1_update.bits.cfi_mispredicted && s1_update.bits.cfi_idx.valid) { val idx = s1_update.bits.cfi_idx.bits val allocate = s1_update_meta.allocate(idx) when (allocate.valid) { s1_update_mask (allocate.bits)(idx) := true.B s1_update_taken(allocate.bits)(idx) := s1_update.bits.cfi_taken s1_update_alloc(allocate.bits)(idx) := true.B s1_update_u_mask(allocate.bits)(idx) := true.B s1_update_u (allocate.bits)(idx) := 0.U } .otherwise { val provider = s1_update_meta.provider(idx) val decr_mask = Mux(provider.valid, ~MaskLower(UIntToOH(provider.bits)), 0.U) for (i <- 0 until tageNTables) { when (decr_mask(i)) { s1_update_u_mask(i)(idx) := true.B s1_update_u (i)(idx) := 0.U } } } } for (i <- 0 until tageNTables) { for (w <- 0 until bankWidth) { tables(i).io.update_mask(w) := RegNext(s1_update_mask(i)(w)) tables(i).io.update_taken(w) := RegNext(s1_update_taken(i)(w)) tables(i).io.update_alloc(w) := RegNext(s1_update_alloc(i)(w)) tables(i).io.update_old_ctr(w) := RegNext(s1_update_old_ctr(i)(w)) tables(i).io.update_u_mask(w) := RegNext(s1_update_u_mask(i)(w)) tables(i).io.update_u(w) := RegNext(s1_update_u(i)(w)) } tables(i).io.update_pc := RegNext(s1_update.bits.pc) tables(i).io.update_hist := RegNext(s1_update.bits.ghist) } //io.f3_meta := Cat(f3_meta.asUInt, micro.io.f3_meta(micro.metaSz-1,0), base.io.f3_meta(base.metaSz-1, 0)) io.f3_meta := f3_meta.asUInt }
module table_20( // @[tage.scala:91:27] input [7:0] R0_addr, input R0_en, input R0_clk, output [47:0] R0_data, input [7:0] W0_addr, input W0_clk, input [47:0] W0_data, input [3:0] W0_mask ); table_0_ext table_0_ext ( // @[tage.scala:91:27] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (1'h1), // @[tage.scala:91:27] .W0_clk (W0_clk), .W0_data (W0_data), .W0_mask (W0_mask) ); // @[tage.scala:91:27] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundRawFNToRecFN_e8_s24_31( // @[RoundAnyRawFNToRecFN.scala:295:5] input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:299:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:299:16] input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:299:16] input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:299:16] output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:299:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:299:16] ); wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:295:5] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:295:5, :299:16, :310:15] wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_31 roundAnyRawFNToRecFN ( // @[RoundAnyRawFNToRecFN.scala:310:15] .io_invalidExc (io_invalidExc_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isNaN (io_in_isNaN_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isInf (io_in_isInf_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_isZero (io_in_isZero_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sign (io_in_sign_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sExp (io_in_sExp_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_in_sig (io_in_sig_0), // @[RoundAnyRawFNToRecFN.scala:295:5] .io_out (io_out_0), .io_exceptionFlags (io_exceptionFlags_0) ); // @[RoundAnyRawFNToRecFN.scala:310:15] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:295:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:295:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_64( // @[AsyncQueue.scala:58:7] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in = 1'h1; // @[ShiftReg.scala:45:23] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_74 io_out_source_valid_0 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.{ AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry, IdRange, RegionType, TransferSizes } import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions} import freechips.rocketchip.util.{ AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase, CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct } import scala.math.max //These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel case class TLMasterToSlaveTransferSizes( // Supports both Acquire+Release of the following two sizes: acquireT: TransferSizes = TransferSizes.none, acquireB: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none) extends TLCommonTransferSizes { def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .intersect(rhs.acquireT), acquireB = acquireB .intersect(rhs.acquireB), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint)) def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .mincover(rhs.acquireT), acquireB = acquireB .mincover(rhs.acquireB), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint)) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(acquireT, "T"), str(acquireB, "B"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""acquireT = ${acquireT} |acquireB = ${acquireB} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLMasterToSlaveTransferSizes { def unknownEmits = TLMasterToSlaveTransferSizes( acquireT = TransferSizes(1, 4096), acquireB = TransferSizes(1, 4096), arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096)) def unknownSupports = TLMasterToSlaveTransferSizes() } //These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel case class TLSlaveToMasterTransferSizes( probe: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none ) extends TLCommonTransferSizes { def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .intersect(rhs.probe), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint) ) def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .mincover(rhs.probe), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint) ) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(probe, "P"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""probe = ${probe} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLSlaveToMasterTransferSizes { def unknownEmits = TLSlaveToMasterTransferSizes( arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096), probe = TransferSizes(1, 4096)) def unknownSupports = TLSlaveToMasterTransferSizes() } trait TLCommonTransferSizes { def arithmetic: TransferSizes def logical: TransferSizes def get: TransferSizes def putFull: TransferSizes def putPartial: TransferSizes def hint: TransferSizes } class TLSlaveParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], setName: Option[String], val address: Seq[AddressSet], val regionType: RegionType.T, val executable: Boolean, val fifoId: Option[Int], val supports: TLMasterToSlaveTransferSizes, val emits: TLSlaveToMasterTransferSizes, // By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation) val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData // If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order // Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck // ReleaseAck may NEVER be denied extends SimpleProduct { def sortedAddress = address.sorted override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters] override def productPrefix = "TLSlaveParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 11 def productElement(n: Int): Any = n match { case 0 => name case 1 => address case 2 => resources case 3 => regionType case 4 => executable case 5 => fifoId case 6 => supports case 7 => emits case 8 => alwaysGrantsT case 9 => mayDenyGet case 10 => mayDenyPut case _ => throw new IndexOutOfBoundsException(n.toString) } def supportsAcquireT: TransferSizes = supports.acquireT def supportsAcquireB: TransferSizes = supports.acquireB def supportsArithmetic: TransferSizes = supports.arithmetic def supportsLogical: TransferSizes = supports.logical def supportsGet: TransferSizes = supports.get def supportsPutFull: TransferSizes = supports.putFull def supportsPutPartial: TransferSizes = supports.putPartial def supportsHint: TransferSizes = supports.hint require (!address.isEmpty, "Address cannot be empty") address.foreach { a => require (a.finite, "Address must be finite") } address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)") require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)") require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)") require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)") require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)") require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)") require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT") // Make sure that the regionType agrees with the capabilities require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected") val maxTransfer = List( // Largest supported transfer of all types supportsAcquireT.max, supportsAcquireB.max, supportsArithmetic.max, supportsLogical.max, supportsGet.max, supportsPutFull.max, supportsPutPartial.max).max val maxAddress = address.map(_.max).max val minAlignment = address.map(_.alignment).min // The device had better not support a transfer larger than its alignment require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)") def toResource: ResourceAddress = { ResourceAddress(address, ResourcePermissions( r = supportsAcquireB || supportsGet, w = supportsAcquireT || supportsPutFull, x = executable, c = supportsAcquireB, a = supportsArithmetic && supportsLogical)) } def findTreeViolation() = nodePath.find { case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false case _: SinkNode[_, _, _, _, _] => false case node => node.inputs.size != 1 } def isTree = findTreeViolation() == None def infoString = { s"""Slave Name = ${name} |Slave Address = ${address} |supports = ${supports.infoString} | |""".stripMargin } def v1copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { new TLSlaveParameters( setName = setName, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = emits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: Option[String] = setName, address: Seq[AddressSet] = address, regionType: RegionType.T = regionType, executable: Boolean = executable, fifoId: Option[Int] = fifoId, supports: TLMasterToSlaveTransferSizes = supports, emits: TLSlaveToMasterTransferSizes = emits, alwaysGrantsT: Boolean = alwaysGrantsT, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } @deprecated("Use v1copy instead of copy","") def copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { v1copy( address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supportsAcquireT = supportsAcquireT, supportsAcquireB = supportsAcquireB, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } } object TLSlaveParameters { def v1( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = { new TLSlaveParameters( setName = None, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLSlaveToMasterTransferSizes.unknownEmits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2( address: Seq[AddressSet], nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Seq(), name: Option[String] = None, regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, fifoId: Option[Int] = None, supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports, emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits, alwaysGrantsT: Boolean = false, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } } object TLManagerParameters { @deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","") def apply( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = TLSlaveParameters.v1( address, resources, regionType, executable, nodePath, supportsAcquireT, supportsAcquireB, supportsArithmetic, supportsLogical, supportsGet, supportsPutFull, supportsPutPartial, supportsHint, mayDenyGet, mayDenyPut, alwaysGrantsT, fifoId, ) } case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int]) { def members = Seq(a, b, c, d) members.collect { case Some(beatBytes) => require (isPow2(beatBytes), "Data channel width must be a power of 2") } } object TLChannelBeatBytes{ def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes( Some(beatBytes), Some(beatBytes), Some(beatBytes), Some(beatBytes)) def apply(): TLChannelBeatBytes = TLChannelBeatBytes( None, None, None, None) } class TLSlavePortParameters private( val slaves: Seq[TLSlaveParameters], val channelBytes: TLChannelBeatBytes, val endSinkId: Int, val minLatency: Int, val responseFields: Seq[BundleFieldBase], val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct { def sortedSlaves = slaves.sortBy(_.sortedAddress.head) override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters] override def productPrefix = "TLSlavePortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => slaves case 1 => channelBytes case 2 => endSinkId case 3 => minLatency case 4 => responseFields case 5 => requestKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!slaves.isEmpty, "Slave ports must have slaves") require (endSinkId >= 0, "Sink ids cannot be negative") require (minLatency >= 0, "Minimum required latency cannot be negative") // Using this API implies you cannot handle mixed-width busses def beatBytes = { channelBytes.members.foreach { width => require (width.isDefined && width == channelBytes.a) } channelBytes.a.get } // TODO this should be deprecated def managers = slaves def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = { val relevant = slaves.filter(m => policy(m)) relevant.foreach { m => require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ") } } // Bounds on required sizes def maxAddress = slaves.map(_.maxAddress).max def maxTransfer = slaves.map(_.maxTransfer).max def mayDenyGet = slaves.exists(_.mayDenyGet) def mayDenyPut = slaves.exists(_.mayDenyPut) // Diplomatically determined operation sizes emitted by all outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _) // Operation Emitted by at least one outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _) val allSupportAcquireT = allSupportClaims.acquireT val allSupportAcquireB = allSupportClaims.acquireB val allSupportArithmetic = allSupportClaims.arithmetic val allSupportLogical = allSupportClaims.logical val allSupportGet = allSupportClaims.get val allSupportPutFull = allSupportClaims.putFull val allSupportPutPartial = allSupportClaims.putPartial val allSupportHint = allSupportClaims.hint // Operation supported by at least one outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _) val anySupportAcquireT = !anySupportClaims.acquireT.none val anySupportAcquireB = !anySupportClaims.acquireB.none val anySupportArithmetic = !anySupportClaims.arithmetic.none val anySupportLogical = !anySupportClaims.logical.none val anySupportGet = !anySupportClaims.get.none val anySupportPutFull = !anySupportClaims.putFull.none val anySupportPutPartial = !anySupportClaims.putPartial.none val anySupportHint = !anySupportClaims.hint.none // Supporting Acquire means being routable for GrantAck require ((endSinkId == 0) == !anySupportAcquireB) // These return Option[TLSlaveParameters] for your convenience def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address))) // The safe version will check the entire address def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _))) // The fast version assumes the address is valid (you probably want fastProperty instead of this function) def findFast(address: UInt) = { val routingMask = AddressDecoder(slaves.map(_.address)) VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _))) } // Compute the simplest AddressSets that decide a key def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = { val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) => k -> vs.flatMap(_._2) } val reductionMask = AddressDecoder(groups.map(_._2)) groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) } } // Select a property def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D = Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) }) // Note: returns the actual fifoId + 1 or 0 if None def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U) def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B) // Does this Port manage this ID/address? def containsSafe(address: UInt) = findSafe(address).reduce(_ || _) private def addressHelper( // setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity safe: Boolean, // member filters out the sizes being checked based on the opcode being emitted or supported member: TLSlaveParameters => TransferSizes, address: UInt, lgSize: UInt, // range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity range: Option[TransferSizes]): Bool = { // trim reduces circuit complexity by intersecting checked sizes with the range argument def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x) // groupBy returns an unordered map, convert back to Seq and sort the result for determinism // groupByIntoSeq is turning slaves into trimmed membership sizes // We are grouping all the slaves by their transfer size where // if they support the trimmed size then // member is the type of transfer that you are looking for (What you are trying to filter on) // When you consider membership, you are trimming the sizes to only the ones that you care about // you are filtering the slaves based on both whether they support a particular opcode and the size // Grouping the slaves based on the actual transfer size range they support // intersecting the range and checking their membership // FOR SUPPORTCASES instead of returning the list of slaves, // you are returning a map from transfer size to the set of // address sets that are supported for that transfer size // find all the slaves that support a certain type of operation and then group their addresses by the supported size // for every size there could be multiple address ranges // safety is a trade off between checking between all possible addresses vs only the addresses // that are known to have supported sizes // the trade off is 'checking all addresses is a more expensive circuit but will always give you // the right answer even if you give it an illegal address' // the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer // fast presumes address legality // This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies. // In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size. val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) => k -> vs.flatMap(_.address) } // safe produces a circuit that compares against all possible addresses, // whereas fast presumes that the address is legal but uses an efficient address decoder val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2)) // Simplified creates the most concise possible representation of each cases' address sets based on the mask. val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) } simplified.map { case (s, a) => // s is a size, you are checking for this size either the size of the operation is in s // We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire. ((Some(s) == range).B || s.containsLg(lgSize)) && a.map(_.contains(address)).reduce(_||_) }.foldLeft(false.B)(_||_) } def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range) def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range) def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range) def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range) def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range) def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range) def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range) def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range) def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range) def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range) def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range) def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range) def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range) def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range) def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range) def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range) def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range) def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range) def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range) def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range) def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range) def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range) def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range) def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption def isTree = !slaves.exists(!_.isTree) def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString def v1copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = managers, channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } def v2copy( slaves: Seq[TLSlaveParameters] = slaves, channelBytes: TLChannelBeatBytes = channelBytes, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = slaves, channelBytes = channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } @deprecated("Use v1copy instead of copy","") def copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { v1copy( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } object TLSlavePortParameters { def v1( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { new TLSlavePortParameters( slaves = managers, channelBytes = TLChannelBeatBytes(beatBytes), endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } } object TLManagerPortParameters { @deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","") def apply( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { TLSlavePortParameters.v1( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } class TLMasterParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], val name: String, val visibility: Seq[AddressSet], val unusedRegionTypes: Set[RegionType.T], val executesOnly: Boolean, val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C. val supports: TLSlaveToMasterTransferSizes, val emits: TLMasterToSlaveTransferSizes, val neverReleasesData: Boolean, val sourceId: IdRange) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters] override def productPrefix = "TLMasterParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 10 def productElement(n: Int): Any = n match { case 0 => name case 1 => sourceId case 2 => resources case 3 => visibility case 4 => unusedRegionTypes case 5 => executesOnly case 6 => requestFifo case 7 => supports case 8 => emits case 9 => neverReleasesData case _ => throw new IndexOutOfBoundsException(n.toString) } require (!sourceId.isEmpty) require (!visibility.isEmpty) require (supports.putFull.contains(supports.putPartial)) // We only support these operations if we support Probe (ie: we're a cache) require (supports.probe.contains(supports.arithmetic)) require (supports.probe.contains(supports.logical)) require (supports.probe.contains(supports.get)) require (supports.probe.contains(supports.putFull)) require (supports.probe.contains(supports.putPartial)) require (supports.probe.contains(supports.hint)) visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } val maxTransfer = List( supports.probe.max, supports.arithmetic.max, supports.logical.max, supports.get.max, supports.putFull.max, supports.putPartial.max).max def infoString = { s"""Master Name = ${name} |visibility = ${visibility} |emits = ${emits.infoString} |sourceId = ${sourceId} | |""".stripMargin } def v1copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { new TLMasterParameters( nodePath = nodePath, resources = this.resources, name = name, visibility = visibility, unusedRegionTypes = this.unusedRegionTypes, executesOnly = this.executesOnly, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = this.emits, neverReleasesData = this.neverReleasesData, sourceId = sourceId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: String = name, visibility: Seq[AddressSet] = visibility, unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes, executesOnly: Boolean = executesOnly, requestFifo: Boolean = requestFifo, supports: TLSlaveToMasterTransferSizes = supports, emits: TLMasterToSlaveTransferSizes = emits, neverReleasesData: Boolean = neverReleasesData, sourceId: IdRange = sourceId) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } @deprecated("Use v1copy instead of copy","") def copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { v1copy( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } object TLMasterParameters { def v1( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { new TLMasterParameters( nodePath = nodePath, resources = Nil, name = name, visibility = visibility, unusedRegionTypes = Set(), executesOnly = false, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData = false, sourceId = sourceId) } def v2( nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Nil, name: String, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), unusedRegionTypes: Set[RegionType.T] = Set(), executesOnly: Boolean = false, requestFifo: Boolean = false, supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports, emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData: Boolean = false, sourceId: IdRange = IdRange(0,1)) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } } object TLClientParameters { @deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","") def apply( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet.everything), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { TLMasterParameters.v1( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } class TLMasterPortParameters private( val masters: Seq[TLMasterParameters], val channelBytes: TLChannelBeatBytes, val minLatency: Int, val echoFields: Seq[BundleFieldBase], val requestFields: Seq[BundleFieldBase], val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters] override def productPrefix = "TLMasterPortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => masters case 1 => channelBytes case 2 => minLatency case 3 => echoFields case 4 => requestFields case 5 => responseKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!masters.isEmpty) require (minLatency >= 0) def clients = masters // Require disjoint ranges for Ids IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) => require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}") } // Bounds on required sizes def endSourceId = masters.map(_.sourceId.end).max def maxTransfer = masters.map(_.maxTransfer).max // The unused sources < endSourceId def unusedSources: Seq[Int] = { val usedSources = masters.map(_.sourceId).sortBy(_.start) ((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) => end until start } } // Diplomatically determined operation sizes emitted by all inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = masters.map(_.emits).reduce( _ intersect _) // Diplomatically determined operation sizes Emitted by at least one inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all inward Masters // as opposed to supports* which generate circuitry to check which specific addresses val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _) val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _) val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _) val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _) val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _) val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _) val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _) // Diplomatically determined operation sizes supported by at least one master // as opposed to supports* which generate circuitry to check which specific addresses val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _) val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _) val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _) val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _) val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _) val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _) val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _) // These return Option[TLMasterParameters] for your convenience def find(id: Int) = masters.find(_.sourceId.contains(id)) // Synthesizable lookup methods def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id))) def contains(id: UInt) = find(id).reduce(_ || _) def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B)) // Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = { val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _) // this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version; // the case where there is only one group. if (allSame) member(masters(0)).containsLg(lgSize) else { // Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize Mux1H(find(id), masters.map(member(_).containsLg(lgSize))) } } // Check for support of a given operation at a specific id val supportsProbe = sourceIdHelper(_.supports.probe) _ val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _ val supportsLogical = sourceIdHelper(_.supports.logical) _ val supportsGet = sourceIdHelper(_.supports.get) _ val supportsPutFull = sourceIdHelper(_.supports.putFull) _ val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _ val supportsHint = sourceIdHelper(_.supports.hint) _ // TODO: Merge sourceIdHelper2 with sourceIdHelper private def sourceIdHelper2( member: TLMasterParameters => TransferSizes, sourceId: UInt, lgSize: UInt): Bool = { // Because sourceIds are uniquely owned by each master, we use them to group the // cases that have to be checked. val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) => k -> vs.map(_.sourceId) } emitCases.map { case (s, a) => (s.containsLg(lgSize)) && a.map(_.contains(sourceId)).reduce(_||_) }.foldLeft(false.B)(_||_) } // Check for emit of a given operation at a specific id def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize) def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize) def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize) def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize) def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize) def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize) def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize) def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize) def infoString = masters.map(_.infoString).mkString def v1copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = clients, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2copy( masters: Seq[TLMasterParameters] = masters, channelBytes: TLChannelBeatBytes = channelBytes, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } @deprecated("Use v1copy instead of copy","") def copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { v1copy( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLClientPortParameters { @deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","") def apply( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { TLMasterPortParameters.v1( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLMasterPortParameters { def v1( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = clients, channelBytes = TLChannelBeatBytes(), minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2( masters: Seq[TLMasterParameters], channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(), minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } } case class TLBundleParameters( addressBits: Int, dataBits: Int, sourceBits: Int, sinkBits: Int, sizeBits: Int, echoFields: Seq[BundleFieldBase], requestFields: Seq[BundleFieldBase], responseFields: Seq[BundleFieldBase], hasBCE: Boolean) { // Chisel has issues with 0-width wires require (addressBits >= 1) require (dataBits >= 8) require (sourceBits >= 1) require (sinkBits >= 1) require (sizeBits >= 1) require (isPow2(dataBits)) echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") } val addrLoBits = log2Up(dataBits/8) // Used to uniquify bus IP names def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u") def union(x: TLBundleParameters) = TLBundleParameters( max(addressBits, x.addressBits), max(dataBits, x.dataBits), max(sourceBits, x.sourceBits), max(sinkBits, x.sinkBits), max(sizeBits, x.sizeBits), echoFields = BundleField.union(echoFields ++ x.echoFields), requestFields = BundleField.union(requestFields ++ x.requestFields), responseFields = BundleField.union(responseFields ++ x.responseFields), hasBCE || x.hasBCE) } object TLBundleParameters { val emptyBundleParams = TLBundleParameters( addressBits = 1, dataBits = 8, sourceBits = 1, sinkBits = 1, sizeBits = 1, echoFields = Nil, requestFields = Nil, responseFields = Nil, hasBCE = false) def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y)) def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) = new TLBundleParameters( addressBits = log2Up(slave.maxAddress + 1), dataBits = slave.beatBytes * 8, sourceBits = log2Up(master.endSourceId), sinkBits = log2Up(slave.endSinkId), sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1), echoFields = master.echoFields, requestFields = BundleField.accept(master.requestFields, slave.requestKeys), responseFields = BundleField.accept(slave.responseFields, master.responseKeys), hasBCE = master.anySupportProbe && slave.anySupportAcquireB) } case class TLEdgeParameters( master: TLMasterPortParameters, slave: TLSlavePortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { // legacy names: def manager = slave def client = master val maxTransfer = max(master.maxTransfer, slave.maxTransfer) val maxLgSize = log2Ceil(maxTransfer) // Sanity check the link... require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})") def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims) val bundle = TLBundleParameters(master, slave) def formatEdge = master.infoString + "\n" + slave.infoString } case class TLCreditedDelay( a: CreditedDelay, b: CreditedDelay, c: CreditedDelay, d: CreditedDelay, e: CreditedDelay) { def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay( a = a + that.a, b = b + that.b, c = c + that.c, d = d + that.d, e = e + that.e) override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})" } object TLCreditedDelay { def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay) } case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString} case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val delay = client.delay + manager.delay val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters) case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base)) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } // To be unified, devices must agree on all of these terms case class ManagerUnificationKey( resources: Seq[Resource], regionType: RegionType.T, executable: Boolean, supportsAcquireT: TransferSizes, supportsAcquireB: TransferSizes, supportsArithmetic: TransferSizes, supportsLogical: TransferSizes, supportsGet: TransferSizes, supportsPutFull: TransferSizes, supportsPutPartial: TransferSizes, supportsHint: TransferSizes) object ManagerUnificationKey { def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey( resources = x.resources, regionType = x.regionType, executable = x.executable, supportsAcquireT = x.supportsAcquireT, supportsAcquireB = x.supportsAcquireB, supportsArithmetic = x.supportsArithmetic, supportsLogical = x.supportsLogical, supportsGet = x.supportsGet, supportsPutFull = x.supportsPutFull, supportsPutPartial = x.supportsPutPartial, supportsHint = x.supportsHint) } object ManagerUnification { def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = { slaves.groupBy(ManagerUnificationKey.apply).values.map { seq => val agree = seq.forall(_.fifoId == seq.head.fifoId) seq(0).v1copy( address = AddressSet.unify(seq.flatMap(_.address)), fifoId = if (agree) seq(0).fifoId else None) }.toList } } case class TLBufferParams( a: BufferParams = BufferParams.none, b: BufferParams = BufferParams.none, c: BufferParams = BufferParams.none, d: BufferParams = BufferParams.none, e: BufferParams = BufferParams.none ) extends DirectedBuffers[TLBufferParams] { def copyIn(x: BufferParams) = this.copy(b = x, d = x) def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x) def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x) } /** Pretty printing of TL source id maps */ class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] { private val tlDigits = String.valueOf(tl.endSourceId-1).length() protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s" private val sorted = tl.masters.sortBy(_.sourceId) val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c => TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo) } } case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean) extends IdMapEntry { val from = tlId val to = tlId val maxTransactionsInFlight = Some(tlId.size) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_83( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [4:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [4:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [4:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [4:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10] wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14] wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27] wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25] wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _source_ok_T_4 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:56:48] wire _source_ok_WIRE_0 = 1'h1; // @[Parameters.scala:1138:31] wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67] wire _source_ok_T_10 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:56:48] wire _source_ok_WIRE_1_0 = 1'h1; // @[Parameters.scala:1138:31] wire sink_ok = 1'h1; // @[Monitor.scala:309:31] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28] wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] c_set = 32'h0; // @[Monitor.scala:738:34] wire [31:0] c_set_wo_ready = 32'h0; // @[Monitor.scala:739:34] wire [31:0] _c_set_wo_ready_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_wo_ready_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_interm_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_interm_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_2_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_3_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_4_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_5_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [4:0] _c_first_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_first_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_first_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_first_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40] wire [4:0] _c_set_wo_ready_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_set_wo_ready_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_opcodes_set_interm_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_opcodes_set_interm_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_sizes_set_interm_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_sizes_set_interm_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51] wire [4:0] _c_opcodes_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_opcodes_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_sizes_set_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_sizes_set_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_probe_ack_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_probe_ack_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _c_probe_ack_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _c_probe_ack_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _same_cycle_resp_WIRE_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _same_cycle_resp_WIRE_1_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _same_cycle_resp_WIRE_2_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _same_cycle_resp_WIRE_3_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [4:0] _same_cycle_resp_WIRE_4_bits_source = 5'h0; // @[Bundles.scala:265:74] wire [4:0] _same_cycle_resp_WIRE_5_bits_source = 5'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [259:0] _c_sizes_set_T_1 = 260'h0; // @[Monitor.scala:768:52] wire [7:0] _c_opcodes_set_T = 8'h0; // @[Monitor.scala:767:79] wire [7:0] _c_sizes_set_T = 8'h0; // @[Monitor.scala:768:77] wire [258:0] _c_opcodes_set_T_1 = 259'h0; // @[Monitor.scala:767:54] wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [31:0] _c_set_wo_ready_T = 32'h1; // @[OneHot.scala:58:35] wire [31:0] _c_set_T = 32'h1; // @[OneHot.scala:58:35] wire [255:0] c_sizes_set = 256'h0; // @[Monitor.scala:741:34] wire [127:0] c_opcodes_set = 128'h0; // @[Monitor.scala:740:34] wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [4:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [4:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [26:0] _GEN = 27'hFFF << io_in_a_bits_size_0; // @[package.scala:243:71] wire [26:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [11:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [31:0] _is_aligned_T = {20'h0, io_in_a_bits_address_0[11:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 32'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 4'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [4:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}] wire [4:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire _T_1257 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1257; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1257; // @[Decoupled.scala:51:35] wire [11:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [4:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _T_1330 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1330; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1330; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1330; // @[Decoupled.scala:51:35] wire [26:0] _GEN_0 = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [4:0] source_1; // @[Monitor.scala:541:22] reg [2:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [31:0] inflight; // @[Monitor.scala:614:27] reg [127:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [255:0] inflight_sizes; // @[Monitor.scala:618:33] wire [11:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [8:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] a_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [31:0] a_set; // @[Monitor.scala:626:34] wire [31:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [127:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [255:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [7:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [7:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [7:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [7:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [7:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [127:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [127:0] _a_opcode_lookup_T_6 = {124'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [127:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[127:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [7:0] _GEN_2 = {io_in_d_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :641:65] wire [7:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65] wire [7:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_2; // @[Monitor.scala:641:65, :681:99] wire [7:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_2; // @[Monitor.scala:641:65, :750:67] wire [7:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_2; // @[Monitor.scala:641:65, :791:99] wire [255:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [255:0] _a_size_lookup_T_6 = {248'h0, _a_size_lookup_T_1[7:0]}; // @[Monitor.scala:641:{40,91}] wire [255:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[255:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [31:0] _GEN_3 = {27'h0, io_in_a_bits_source_0}; // @[OneHot.scala:58:35] wire [31:0] _GEN_4 = 32'h1 << _GEN_3; // @[OneHot.scala:58:35] wire [31:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_4; // @[OneHot.scala:58:35] wire [31:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_4; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T : 32'h0; // @[OneHot.scala:58:35] wire _T_1183 = _T_1257 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1183 ? _a_set_T : 32'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1183 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [4:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [4:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[4:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1183 ? _a_sizes_set_interm_T_1 : 5'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [7:0] _a_opcodes_set_T = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [258:0] _a_opcodes_set_T_1 = {255'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1183 ? _a_opcodes_set_T_1[127:0] : 128'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [7:0] _a_sizes_set_T = {io_in_a_bits_source_0, 3'h0}; // @[Monitor.scala:36:7, :660:77] wire [259:0] _a_sizes_set_T_1 = {255'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1183 ? _a_sizes_set_T_1[255:0] : 256'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [31:0] d_clr; // @[Monitor.scala:664:34] wire [31:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [127:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [255:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_5 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_5; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_5; // @[Monitor.scala:673:46, :783:46] wire _T_1229 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [31:0] _GEN_6 = {27'h0, io_in_d_bits_source_0}; // @[OneHot.scala:58:35] wire [31:0] _GEN_7 = 32'h1 << _GEN_6; // @[OneHot.scala:58:35] wire [31:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_7; // @[OneHot.scala:58:35] wire [31:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_7; // @[OneHot.scala:58:35] wire [31:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_7; // @[OneHot.scala:58:35] wire [31:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_7; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1229 & ~d_release_ack ? _d_clr_wo_ready_T : 32'h0; // @[OneHot.scala:58:35] wire _T_1198 = _T_1330 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1198 ? _d_clr_T : 32'h0; // @[OneHot.scala:58:35] wire [270:0] _d_opcodes_clr_T_5 = 271'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1198 ? _d_opcodes_clr_T_5[127:0] : 128'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [270:0] _d_sizes_clr_T_5 = 271'hFF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1198 ? _d_sizes_clr_T_5[255:0] : 256'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [31:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [31:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [31:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [127:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [127:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [127:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [255:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [255:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [255:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [31:0] inflight_1; // @[Monitor.scala:726:35] wire [31:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [127:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [127:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [255:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [255:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_2; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [127:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [127:0] _c_opcode_lookup_T_6 = {124'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [127:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[127:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [255:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [255:0] _c_size_lookup_T_6 = {248'h0, _c_size_lookup_T_1[7:0]}; // @[Monitor.scala:750:{42,93}] wire [255:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[255:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [31:0] d_clr_1; // @[Monitor.scala:774:34] wire [31:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [127:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [255:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1301 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1301 & d_release_ack_1 ? _d_clr_wo_ready_T_1 : 32'h0; // @[OneHot.scala:58:35] wire _T_1283 = _T_1330 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1283 ? _d_clr_T_1 : 32'h0; // @[OneHot.scala:58:35] wire [270:0] _d_opcodes_clr_T_11 = 271'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1283 ? _d_opcodes_clr_T_11[127:0] : 128'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [270:0] _d_sizes_clr_T_11 = 271'hFF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1283 ? _d_sizes_clr_T_11[255:0] : 256'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 5'h0; // @[Monitor.scala:36:7, :795:113] wire [31:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [31:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [127:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [127:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [255:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [255:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_199( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_216 io_out_source_valid ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.{ AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry, IdRange, RegionType, TransferSizes } import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions} import freechips.rocketchip.util.{ AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase, CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct } import scala.math.max //These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel case class TLMasterToSlaveTransferSizes( // Supports both Acquire+Release of the following two sizes: acquireT: TransferSizes = TransferSizes.none, acquireB: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none) extends TLCommonTransferSizes { def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .intersect(rhs.acquireT), acquireB = acquireB .intersect(rhs.acquireB), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint)) def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .mincover(rhs.acquireT), acquireB = acquireB .mincover(rhs.acquireB), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint)) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(acquireT, "T"), str(acquireB, "B"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""acquireT = ${acquireT} |acquireB = ${acquireB} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLMasterToSlaveTransferSizes { def unknownEmits = TLMasterToSlaveTransferSizes( acquireT = TransferSizes(1, 4096), acquireB = TransferSizes(1, 4096), arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096)) def unknownSupports = TLMasterToSlaveTransferSizes() } //These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel case class TLSlaveToMasterTransferSizes( probe: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none ) extends TLCommonTransferSizes { def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .intersect(rhs.probe), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint) ) def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .mincover(rhs.probe), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint) ) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(probe, "P"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""probe = ${probe} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLSlaveToMasterTransferSizes { def unknownEmits = TLSlaveToMasterTransferSizes( arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096), probe = TransferSizes(1, 4096)) def unknownSupports = TLSlaveToMasterTransferSizes() } trait TLCommonTransferSizes { def arithmetic: TransferSizes def logical: TransferSizes def get: TransferSizes def putFull: TransferSizes def putPartial: TransferSizes def hint: TransferSizes } class TLSlaveParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], setName: Option[String], val address: Seq[AddressSet], val regionType: RegionType.T, val executable: Boolean, val fifoId: Option[Int], val supports: TLMasterToSlaveTransferSizes, val emits: TLSlaveToMasterTransferSizes, // By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation) val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData // If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order // Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck // ReleaseAck may NEVER be denied extends SimpleProduct { def sortedAddress = address.sorted override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters] override def productPrefix = "TLSlaveParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 11 def productElement(n: Int): Any = n match { case 0 => name case 1 => address case 2 => resources case 3 => regionType case 4 => executable case 5 => fifoId case 6 => supports case 7 => emits case 8 => alwaysGrantsT case 9 => mayDenyGet case 10 => mayDenyPut case _ => throw new IndexOutOfBoundsException(n.toString) } def supportsAcquireT: TransferSizes = supports.acquireT def supportsAcquireB: TransferSizes = supports.acquireB def supportsArithmetic: TransferSizes = supports.arithmetic def supportsLogical: TransferSizes = supports.logical def supportsGet: TransferSizes = supports.get def supportsPutFull: TransferSizes = supports.putFull def supportsPutPartial: TransferSizes = supports.putPartial def supportsHint: TransferSizes = supports.hint require (!address.isEmpty, "Address cannot be empty") address.foreach { a => require (a.finite, "Address must be finite") } address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)") require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)") require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)") require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)") require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)") require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)") require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT") // Make sure that the regionType agrees with the capabilities require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected") val maxTransfer = List( // Largest supported transfer of all types supportsAcquireT.max, supportsAcquireB.max, supportsArithmetic.max, supportsLogical.max, supportsGet.max, supportsPutFull.max, supportsPutPartial.max).max val maxAddress = address.map(_.max).max val minAlignment = address.map(_.alignment).min // The device had better not support a transfer larger than its alignment require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)") def toResource: ResourceAddress = { ResourceAddress(address, ResourcePermissions( r = supportsAcquireB || supportsGet, w = supportsAcquireT || supportsPutFull, x = executable, c = supportsAcquireB, a = supportsArithmetic && supportsLogical)) } def findTreeViolation() = nodePath.find { case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false case _: SinkNode[_, _, _, _, _] => false case node => node.inputs.size != 1 } def isTree = findTreeViolation() == None def infoString = { s"""Slave Name = ${name} |Slave Address = ${address} |supports = ${supports.infoString} | |""".stripMargin } def v1copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { new TLSlaveParameters( setName = setName, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = emits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: Option[String] = setName, address: Seq[AddressSet] = address, regionType: RegionType.T = regionType, executable: Boolean = executable, fifoId: Option[Int] = fifoId, supports: TLMasterToSlaveTransferSizes = supports, emits: TLSlaveToMasterTransferSizes = emits, alwaysGrantsT: Boolean = alwaysGrantsT, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } @deprecated("Use v1copy instead of copy","") def copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { v1copy( address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supportsAcquireT = supportsAcquireT, supportsAcquireB = supportsAcquireB, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } } object TLSlaveParameters { def v1( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = { new TLSlaveParameters( setName = None, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLSlaveToMasterTransferSizes.unknownEmits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2( address: Seq[AddressSet], nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Seq(), name: Option[String] = None, regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, fifoId: Option[Int] = None, supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports, emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits, alwaysGrantsT: Boolean = false, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } } object TLManagerParameters { @deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","") def apply( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = TLSlaveParameters.v1( address, resources, regionType, executable, nodePath, supportsAcquireT, supportsAcquireB, supportsArithmetic, supportsLogical, supportsGet, supportsPutFull, supportsPutPartial, supportsHint, mayDenyGet, mayDenyPut, alwaysGrantsT, fifoId, ) } case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int]) { def members = Seq(a, b, c, d) members.collect { case Some(beatBytes) => require (isPow2(beatBytes), "Data channel width must be a power of 2") } } object TLChannelBeatBytes{ def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes( Some(beatBytes), Some(beatBytes), Some(beatBytes), Some(beatBytes)) def apply(): TLChannelBeatBytes = TLChannelBeatBytes( None, None, None, None) } class TLSlavePortParameters private( val slaves: Seq[TLSlaveParameters], val channelBytes: TLChannelBeatBytes, val endSinkId: Int, val minLatency: Int, val responseFields: Seq[BundleFieldBase], val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct { def sortedSlaves = slaves.sortBy(_.sortedAddress.head) override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters] override def productPrefix = "TLSlavePortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => slaves case 1 => channelBytes case 2 => endSinkId case 3 => minLatency case 4 => responseFields case 5 => requestKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!slaves.isEmpty, "Slave ports must have slaves") require (endSinkId >= 0, "Sink ids cannot be negative") require (minLatency >= 0, "Minimum required latency cannot be negative") // Using this API implies you cannot handle mixed-width busses def beatBytes = { channelBytes.members.foreach { width => require (width.isDefined && width == channelBytes.a) } channelBytes.a.get } // TODO this should be deprecated def managers = slaves def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = { val relevant = slaves.filter(m => policy(m)) relevant.foreach { m => require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ") } } // Bounds on required sizes def maxAddress = slaves.map(_.maxAddress).max def maxTransfer = slaves.map(_.maxTransfer).max def mayDenyGet = slaves.exists(_.mayDenyGet) def mayDenyPut = slaves.exists(_.mayDenyPut) // Diplomatically determined operation sizes emitted by all outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _) // Operation Emitted by at least one outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _) val allSupportAcquireT = allSupportClaims.acquireT val allSupportAcquireB = allSupportClaims.acquireB val allSupportArithmetic = allSupportClaims.arithmetic val allSupportLogical = allSupportClaims.logical val allSupportGet = allSupportClaims.get val allSupportPutFull = allSupportClaims.putFull val allSupportPutPartial = allSupportClaims.putPartial val allSupportHint = allSupportClaims.hint // Operation supported by at least one outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _) val anySupportAcquireT = !anySupportClaims.acquireT.none val anySupportAcquireB = !anySupportClaims.acquireB.none val anySupportArithmetic = !anySupportClaims.arithmetic.none val anySupportLogical = !anySupportClaims.logical.none val anySupportGet = !anySupportClaims.get.none val anySupportPutFull = !anySupportClaims.putFull.none val anySupportPutPartial = !anySupportClaims.putPartial.none val anySupportHint = !anySupportClaims.hint.none // Supporting Acquire means being routable for GrantAck require ((endSinkId == 0) == !anySupportAcquireB) // These return Option[TLSlaveParameters] for your convenience def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address))) // The safe version will check the entire address def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _))) // The fast version assumes the address is valid (you probably want fastProperty instead of this function) def findFast(address: UInt) = { val routingMask = AddressDecoder(slaves.map(_.address)) VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _))) } // Compute the simplest AddressSets that decide a key def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = { val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) => k -> vs.flatMap(_._2) } val reductionMask = AddressDecoder(groups.map(_._2)) groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) } } // Select a property def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D = Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) }) // Note: returns the actual fifoId + 1 or 0 if None def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U) def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B) // Does this Port manage this ID/address? def containsSafe(address: UInt) = findSafe(address).reduce(_ || _) private def addressHelper( // setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity safe: Boolean, // member filters out the sizes being checked based on the opcode being emitted or supported member: TLSlaveParameters => TransferSizes, address: UInt, lgSize: UInt, // range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity range: Option[TransferSizes]): Bool = { // trim reduces circuit complexity by intersecting checked sizes with the range argument def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x) // groupBy returns an unordered map, convert back to Seq and sort the result for determinism // groupByIntoSeq is turning slaves into trimmed membership sizes // We are grouping all the slaves by their transfer size where // if they support the trimmed size then // member is the type of transfer that you are looking for (What you are trying to filter on) // When you consider membership, you are trimming the sizes to only the ones that you care about // you are filtering the slaves based on both whether they support a particular opcode and the size // Grouping the slaves based on the actual transfer size range they support // intersecting the range and checking their membership // FOR SUPPORTCASES instead of returning the list of slaves, // you are returning a map from transfer size to the set of // address sets that are supported for that transfer size // find all the slaves that support a certain type of operation and then group their addresses by the supported size // for every size there could be multiple address ranges // safety is a trade off between checking between all possible addresses vs only the addresses // that are known to have supported sizes // the trade off is 'checking all addresses is a more expensive circuit but will always give you // the right answer even if you give it an illegal address' // the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer // fast presumes address legality // This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies. // In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size. val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) => k -> vs.flatMap(_.address) } // safe produces a circuit that compares against all possible addresses, // whereas fast presumes that the address is legal but uses an efficient address decoder val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2)) // Simplified creates the most concise possible representation of each cases' address sets based on the mask. val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) } simplified.map { case (s, a) => // s is a size, you are checking for this size either the size of the operation is in s // We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire. ((Some(s) == range).B || s.containsLg(lgSize)) && a.map(_.contains(address)).reduce(_||_) }.foldLeft(false.B)(_||_) } def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range) def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range) def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range) def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range) def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range) def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range) def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range) def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range) def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range) def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range) def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range) def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range) def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range) def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range) def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range) def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range) def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range) def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range) def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range) def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range) def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range) def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range) def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range) def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption def isTree = !slaves.exists(!_.isTree) def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString def v1copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = managers, channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } def v2copy( slaves: Seq[TLSlaveParameters] = slaves, channelBytes: TLChannelBeatBytes = channelBytes, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = slaves, channelBytes = channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } @deprecated("Use v1copy instead of copy","") def copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { v1copy( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } object TLSlavePortParameters { def v1( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { new TLSlavePortParameters( slaves = managers, channelBytes = TLChannelBeatBytes(beatBytes), endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } } object TLManagerPortParameters { @deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","") def apply( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { TLSlavePortParameters.v1( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } class TLMasterParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], val name: String, val visibility: Seq[AddressSet], val unusedRegionTypes: Set[RegionType.T], val executesOnly: Boolean, val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C. val supports: TLSlaveToMasterTransferSizes, val emits: TLMasterToSlaveTransferSizes, val neverReleasesData: Boolean, val sourceId: IdRange) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters] override def productPrefix = "TLMasterParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 10 def productElement(n: Int): Any = n match { case 0 => name case 1 => sourceId case 2 => resources case 3 => visibility case 4 => unusedRegionTypes case 5 => executesOnly case 6 => requestFifo case 7 => supports case 8 => emits case 9 => neverReleasesData case _ => throw new IndexOutOfBoundsException(n.toString) } require (!sourceId.isEmpty) require (!visibility.isEmpty) require (supports.putFull.contains(supports.putPartial)) // We only support these operations if we support Probe (ie: we're a cache) require (supports.probe.contains(supports.arithmetic)) require (supports.probe.contains(supports.logical)) require (supports.probe.contains(supports.get)) require (supports.probe.contains(supports.putFull)) require (supports.probe.contains(supports.putPartial)) require (supports.probe.contains(supports.hint)) visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } val maxTransfer = List( supports.probe.max, supports.arithmetic.max, supports.logical.max, supports.get.max, supports.putFull.max, supports.putPartial.max).max def infoString = { s"""Master Name = ${name} |visibility = ${visibility} |emits = ${emits.infoString} |sourceId = ${sourceId} | |""".stripMargin } def v1copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { new TLMasterParameters( nodePath = nodePath, resources = this.resources, name = name, visibility = visibility, unusedRegionTypes = this.unusedRegionTypes, executesOnly = this.executesOnly, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = this.emits, neverReleasesData = this.neverReleasesData, sourceId = sourceId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: String = name, visibility: Seq[AddressSet] = visibility, unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes, executesOnly: Boolean = executesOnly, requestFifo: Boolean = requestFifo, supports: TLSlaveToMasterTransferSizes = supports, emits: TLMasterToSlaveTransferSizes = emits, neverReleasesData: Boolean = neverReleasesData, sourceId: IdRange = sourceId) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } @deprecated("Use v1copy instead of copy","") def copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { v1copy( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } object TLMasterParameters { def v1( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { new TLMasterParameters( nodePath = nodePath, resources = Nil, name = name, visibility = visibility, unusedRegionTypes = Set(), executesOnly = false, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData = false, sourceId = sourceId) } def v2( nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Nil, name: String, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), unusedRegionTypes: Set[RegionType.T] = Set(), executesOnly: Boolean = false, requestFifo: Boolean = false, supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports, emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData: Boolean = false, sourceId: IdRange = IdRange(0,1)) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } } object TLClientParameters { @deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","") def apply( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet.everything), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { TLMasterParameters.v1( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } class TLMasterPortParameters private( val masters: Seq[TLMasterParameters], val channelBytes: TLChannelBeatBytes, val minLatency: Int, val echoFields: Seq[BundleFieldBase], val requestFields: Seq[BundleFieldBase], val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters] override def productPrefix = "TLMasterPortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => masters case 1 => channelBytes case 2 => minLatency case 3 => echoFields case 4 => requestFields case 5 => responseKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!masters.isEmpty) require (minLatency >= 0) def clients = masters // Require disjoint ranges for Ids IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) => require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}") } // Bounds on required sizes def endSourceId = masters.map(_.sourceId.end).max def maxTransfer = masters.map(_.maxTransfer).max // The unused sources < endSourceId def unusedSources: Seq[Int] = { val usedSources = masters.map(_.sourceId).sortBy(_.start) ((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) => end until start } } // Diplomatically determined operation sizes emitted by all inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = masters.map(_.emits).reduce( _ intersect _) // Diplomatically determined operation sizes Emitted by at least one inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all inward Masters // as opposed to supports* which generate circuitry to check which specific addresses val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _) val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _) val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _) val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _) val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _) val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _) val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _) // Diplomatically determined operation sizes supported by at least one master // as opposed to supports* which generate circuitry to check which specific addresses val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _) val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _) val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _) val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _) val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _) val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _) val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _) // These return Option[TLMasterParameters] for your convenience def find(id: Int) = masters.find(_.sourceId.contains(id)) // Synthesizable lookup methods def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id))) def contains(id: UInt) = find(id).reduce(_ || _) def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B)) // Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = { val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _) // this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version; // the case where there is only one group. if (allSame) member(masters(0)).containsLg(lgSize) else { // Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize Mux1H(find(id), masters.map(member(_).containsLg(lgSize))) } } // Check for support of a given operation at a specific id val supportsProbe = sourceIdHelper(_.supports.probe) _ val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _ val supportsLogical = sourceIdHelper(_.supports.logical) _ val supportsGet = sourceIdHelper(_.supports.get) _ val supportsPutFull = sourceIdHelper(_.supports.putFull) _ val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _ val supportsHint = sourceIdHelper(_.supports.hint) _ // TODO: Merge sourceIdHelper2 with sourceIdHelper private def sourceIdHelper2( member: TLMasterParameters => TransferSizes, sourceId: UInt, lgSize: UInt): Bool = { // Because sourceIds are uniquely owned by each master, we use them to group the // cases that have to be checked. val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) => k -> vs.map(_.sourceId) } emitCases.map { case (s, a) => (s.containsLg(lgSize)) && a.map(_.contains(sourceId)).reduce(_||_) }.foldLeft(false.B)(_||_) } // Check for emit of a given operation at a specific id def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize) def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize) def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize) def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize) def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize) def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize) def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize) def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize) def infoString = masters.map(_.infoString).mkString def v1copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = clients, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2copy( masters: Seq[TLMasterParameters] = masters, channelBytes: TLChannelBeatBytes = channelBytes, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } @deprecated("Use v1copy instead of copy","") def copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { v1copy( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLClientPortParameters { @deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","") def apply( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { TLMasterPortParameters.v1( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLMasterPortParameters { def v1( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = clients, channelBytes = TLChannelBeatBytes(), minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2( masters: Seq[TLMasterParameters], channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(), minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } } case class TLBundleParameters( addressBits: Int, dataBits: Int, sourceBits: Int, sinkBits: Int, sizeBits: Int, echoFields: Seq[BundleFieldBase], requestFields: Seq[BundleFieldBase], responseFields: Seq[BundleFieldBase], hasBCE: Boolean) { // Chisel has issues with 0-width wires require (addressBits >= 1) require (dataBits >= 8) require (sourceBits >= 1) require (sinkBits >= 1) require (sizeBits >= 1) require (isPow2(dataBits)) echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") } val addrLoBits = log2Up(dataBits/8) // Used to uniquify bus IP names def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u") def union(x: TLBundleParameters) = TLBundleParameters( max(addressBits, x.addressBits), max(dataBits, x.dataBits), max(sourceBits, x.sourceBits), max(sinkBits, x.sinkBits), max(sizeBits, x.sizeBits), echoFields = BundleField.union(echoFields ++ x.echoFields), requestFields = BundleField.union(requestFields ++ x.requestFields), responseFields = BundleField.union(responseFields ++ x.responseFields), hasBCE || x.hasBCE) } object TLBundleParameters { val emptyBundleParams = TLBundleParameters( addressBits = 1, dataBits = 8, sourceBits = 1, sinkBits = 1, sizeBits = 1, echoFields = Nil, requestFields = Nil, responseFields = Nil, hasBCE = false) def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y)) def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) = new TLBundleParameters( addressBits = log2Up(slave.maxAddress + 1), dataBits = slave.beatBytes * 8, sourceBits = log2Up(master.endSourceId), sinkBits = log2Up(slave.endSinkId), sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1), echoFields = master.echoFields, requestFields = BundleField.accept(master.requestFields, slave.requestKeys), responseFields = BundleField.accept(slave.responseFields, master.responseKeys), hasBCE = master.anySupportProbe && slave.anySupportAcquireB) } case class TLEdgeParameters( master: TLMasterPortParameters, slave: TLSlavePortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { // legacy names: def manager = slave def client = master val maxTransfer = max(master.maxTransfer, slave.maxTransfer) val maxLgSize = log2Ceil(maxTransfer) // Sanity check the link... require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})") def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims) val bundle = TLBundleParameters(master, slave) def formatEdge = master.infoString + "\n" + slave.infoString } case class TLCreditedDelay( a: CreditedDelay, b: CreditedDelay, c: CreditedDelay, d: CreditedDelay, e: CreditedDelay) { def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay( a = a + that.a, b = b + that.b, c = c + that.c, d = d + that.d, e = e + that.e) override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})" } object TLCreditedDelay { def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay) } case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString} case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val delay = client.delay + manager.delay val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters) case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base)) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } // To be unified, devices must agree on all of these terms case class ManagerUnificationKey( resources: Seq[Resource], regionType: RegionType.T, executable: Boolean, supportsAcquireT: TransferSizes, supportsAcquireB: TransferSizes, supportsArithmetic: TransferSizes, supportsLogical: TransferSizes, supportsGet: TransferSizes, supportsPutFull: TransferSizes, supportsPutPartial: TransferSizes, supportsHint: TransferSizes) object ManagerUnificationKey { def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey( resources = x.resources, regionType = x.regionType, executable = x.executable, supportsAcquireT = x.supportsAcquireT, supportsAcquireB = x.supportsAcquireB, supportsArithmetic = x.supportsArithmetic, supportsLogical = x.supportsLogical, supportsGet = x.supportsGet, supportsPutFull = x.supportsPutFull, supportsPutPartial = x.supportsPutPartial, supportsHint = x.supportsHint) } object ManagerUnification { def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = { slaves.groupBy(ManagerUnificationKey.apply).values.map { seq => val agree = seq.forall(_.fifoId == seq.head.fifoId) seq(0).v1copy( address = AddressSet.unify(seq.flatMap(_.address)), fifoId = if (agree) seq(0).fifoId else None) }.toList } } case class TLBufferParams( a: BufferParams = BufferParams.none, b: BufferParams = BufferParams.none, c: BufferParams = BufferParams.none, d: BufferParams = BufferParams.none, e: BufferParams = BufferParams.none ) extends DirectedBuffers[TLBufferParams] { def copyIn(x: BufferParams) = this.copy(b = x, d = x) def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x) def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x) } /** Pretty printing of TL source id maps */ class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] { private val tlDigits = String.valueOf(tl.endSourceId-1).length() protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s" private val sorted = tl.masters.sortBy(_.sourceId) val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c => TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo) } } case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean) extends IdMapEntry { val from = tlId val to = tlId val maxTransactionsInFlight = Some(tlId.size) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_43( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [8:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [31:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [8:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [31:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [31:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_a_bits_source = 1'h0; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_source = 1'h0; // @[Monitor.scala:36:7] wire mask_sizeOH_shiftAmount = 1'h0; // @[OneHot.scala:64:49] wire mask_sub_size = 1'h0; // @[Misc.scala:209:26] wire _mask_sub_acc_T = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_1 = 1'h0; // @[Misc.scala:215:38] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire a_first_count = 1'h0; // @[Edges.scala:234:25] wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire d_first_count = 1'h0; // @[Edges.scala:234:25] wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59] wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14] wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25] wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14] wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27] wire c_first_count = 1'h0; // @[Edges.scala:234:25] wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21] wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14] wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25] wire c_set = 1'h0; // @[Monitor.scala:738:34] wire c_set_wo_ready = 1'h0; // @[Monitor.scala:739:34] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire _source_ok_T = 1'h1; // @[Parameters.scala:46:9] wire _source_ok_WIRE_0 = 1'h1; // @[Parameters.scala:1138:31] wire mask_sub_sub_0_1 = 1'h1; // @[Misc.scala:206:21] wire mask_sub_0_1 = 1'h1; // @[Misc.scala:215:29] wire mask_sub_1_1 = 1'h1; // @[Misc.scala:215:29] wire mask_size = 1'h1; // @[Misc.scala:209:26] wire mask_acc = 1'h1; // @[Misc.scala:215:29] wire mask_acc_1 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_2 = 1'h1; // @[Misc.scala:215:29] wire mask_acc_3 = 1'h1; // @[Misc.scala:215:29] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:46:9] wire _source_ok_WIRE_1_0 = 1'h1; // @[Parameters.scala:1138:31] wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire a_first_last = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire d_first_last = 1'h1; // @[Edges.scala:232:33] wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire _same_cycle_resp_T_2 = 1'h1; // @[Monitor.scala:684:113] wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43] wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33] wire _same_cycle_resp_T_8 = 1'h1; // @[Monitor.scala:795:113] wire [1:0] is_aligned_mask = 2'h3; // @[package.scala:243:46] wire [1:0] mask_lo = 2'h3; // @[Misc.scala:222:10] wire [1:0] mask_hi = 2'h3; // @[Misc.scala:222:10] wire [1:0] _a_first_beats1_decode_T_2 = 2'h3; // @[package.scala:243:46] wire [1:0] _a_first_beats1_decode_T_5 = 2'h3; // @[package.scala:243:46] wire [1:0] _c_first_beats1_decode_T_1 = 2'h3; // @[package.scala:243:76] wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28] wire [1:0] io_in_a_bits_size = 2'h2; // @[Monitor.scala:36:7] wire [1:0] _mask_sizeOH_T = 2'h2; // @[Misc.scala:202:34] wire [2:0] io_in_a_bits_param = 3'h0; // @[Monitor.scala:36:7] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [3:0] io_in_a_bits_mask = 4'hF; // @[Monitor.scala:36:7] wire [3:0] mask = 4'hF; // @[Misc.scala:222:10] wire [31:0] _c_first_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_first_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_first_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_wo_ready_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_wo_ready_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_interm_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_interm_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_interm_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_interm_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_opcodes_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_opcodes_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_sizes_set_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_sizes_set_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _c_probe_ack_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _c_probe_ack_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_1_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_2_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_3_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _same_cycle_resp_WIRE_4_bits_data = 32'h0; // @[Bundles.scala:265:74] wire [31:0] _same_cycle_resp_WIRE_5_bits_data = 32'h0; // @[Bundles.scala:265:61] wire [8:0] _c_first_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_first_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_first_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_first_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_set_wo_ready_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_set_wo_ready_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_set_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_set_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_opcodes_set_interm_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_opcodes_set_interm_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_sizes_set_interm_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_sizes_set_interm_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_opcodes_set_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_opcodes_set_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_sizes_set_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_sizes_set_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_probe_ack_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_probe_ack_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_probe_ack_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_probe_ack_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _same_cycle_resp_WIRE_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _same_cycle_resp_WIRE_1_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _same_cycle_resp_WIRE_2_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _same_cycle_resp_WIRE_3_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _same_cycle_resp_WIRE_4_bits_address = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _same_cycle_resp_WIRE_5_bits_address = 9'h0; // @[Bundles.scala:265:61] wire [1:0] _is_aligned_mask_T_1 = 2'h0; // @[package.scala:243:76] wire [1:0] _a_first_beats1_decode_T_1 = 2'h0; // @[package.scala:243:76] wire [1:0] _a_first_beats1_decode_T_4 = 2'h0; // @[package.scala:243:76] wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_first_beats1_decode_T_2 = 2'h0; // @[package.scala:243:46] wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [30:0] _d_opcodes_clr_T_5 = 31'hF; // @[Monitor.scala:680:76] wire [30:0] _d_sizes_clr_T_5 = 31'hF; // @[Monitor.scala:681:74] wire [30:0] _d_opcodes_clr_T_11 = 31'hF; // @[Monitor.scala:790:76] wire [30:0] _d_sizes_clr_T_11 = 31'hF; // @[Monitor.scala:791:74] wire [3:0] _a_opcode_lookup_T = 4'h0; // @[Monitor.scala:637:69] wire [3:0] _a_size_lookup_T = 4'h0; // @[Monitor.scala:641:65] wire [3:0] _a_opcodes_set_T = 4'h0; // @[Monitor.scala:659:79] wire [3:0] _a_sizes_set_T = 4'h0; // @[Monitor.scala:660:77] wire [3:0] _d_opcodes_clr_T_4 = 4'h0; // @[Monitor.scala:680:101] wire [3:0] _d_sizes_clr_T_4 = 4'h0; // @[Monitor.scala:681:99] wire [3:0] c_opcodes_set = 4'h0; // @[Monitor.scala:740:34] wire [3:0] c_sizes_set = 4'h0; // @[Monitor.scala:741:34] wire [3:0] _c_opcode_lookup_T = 4'h0; // @[Monitor.scala:749:69] wire [3:0] _c_size_lookup_T = 4'h0; // @[Monitor.scala:750:67] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_opcodes_set_T = 4'h0; // @[Monitor.scala:767:79] wire [3:0] _c_sizes_set_T = 4'h0; // @[Monitor.scala:768:77] wire [3:0] _d_opcodes_clr_T_10 = 4'h0; // @[Monitor.scala:790:101] wire [3:0] _d_sizes_clr_T_10 = 4'h0; // @[Monitor.scala:791:99] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [1:0] _mask_sizeOH_T_1 = 2'h1; // @[OneHot.scala:65:12] wire [1:0] _mask_sizeOH_T_2 = 2'h1; // @[OneHot.scala:65:27] wire [1:0] mask_sizeOH = 2'h1; // @[Misc.scala:202:81] wire [1:0] _a_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _a_set_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _c_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _c_set_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T_1 = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T_1 = 2'h1; // @[OneHot.scala:58:35] wire [17:0] _c_sizes_set_T_1 = 18'h0; // @[Monitor.scala:768:52] wire [18:0] _c_opcodes_set_T_1 = 19'h0; // @[Monitor.scala:767:54] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [4:0] _c_first_beats1_decode_T = 5'h3; // @[package.scala:243:71] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] _a_sizes_set_interm_T_1 = 3'h5; // @[Monitor.scala:658:59] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] _a_sizes_set_interm_T = 3'h4; // @[Monitor.scala:658:51] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [4:0] _is_aligned_mask_T = 5'hC; // @[package.scala:243:71] wire [4:0] _a_first_beats1_decode_T = 5'hC; // @[package.scala:243:71] wire [4:0] _a_first_beats1_decode_T_3 = 5'hC; // @[package.scala:243:71] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [8:0] _is_aligned_T = {7'h0, io_in_a_bits_address_0[1:0]}; // @[Monitor.scala:36:7] wire is_aligned = _is_aligned_T == 9'h0; // @[Edges.scala:21:{16,24}] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_1_2 = mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_eq; // @[Misc.scala:214:27, :215:38] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_eq_1; // @[Misc.scala:214:27, :215:38] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_eq_2; // @[Misc.scala:214:27, :215:38] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_eq_3; // @[Misc.scala:214:27, :215:38] wire _T_598 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_598; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_598; // @[Decoupled.scala:51:35] wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] reg a_first_counter; // @[Edges.scala:229:27] wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28] wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25] wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [8:0] address; // @[Monitor.scala:391:22] wire _T_666 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_666; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_666; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_666; // @[Decoupled.scala:51:35] wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35] wire [4:0] _GEN = 5'h3 << io_in_d_bits_size_0; // @[package.scala:243:71] wire [4:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [4:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [4:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN; // @[package.scala:243:71] wire [1:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[1:0]; // @[package.scala:243:{71,76}] wire [1:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] reg d_first_counter; // @[Edges.scala:229:27] wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28] wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [1:0] size_1; // @[Monitor.scala:540:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [1:0] inflight; // @[Monitor.scala:614:27] reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35] wire [3:0] _a_opcode_lookup_T_1 = inflight_opcodes; // @[Monitor.scala:616:35, :637:44] reg [3:0] inflight_sizes; // @[Monitor.scala:618:33] wire [3:0] _a_size_lookup_T_1 = inflight_sizes; // @[Monitor.scala:618:33, :641:40] wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] reg a_first_counter_1; // @[Edges.scala:229:27] wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25] wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28] wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28] wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25] wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35] wire [1:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[1:0]; // @[package.scala:243:{71,76}] wire [1:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] reg d_first_counter_1; // @[Edges.scala:229:27] wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28] wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire a_set; // @[Monitor.scala:626:34] wire a_set_wo_ready; // @[Monitor.scala:627:34] wire [3:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [3:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [15:0] _a_opcode_lookup_T_6 = {12'h0, _a_opcode_lookup_T_1}; // @[Monitor.scala:637:{44,97}] wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [15:0] _a_size_lookup_T_6 = {12'h0, _a_size_lookup_T_1}; // @[Monitor.scala:637:97, :641:{40,91}] wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _T_528 = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26] assign a_set_wo_ready = _T_528; // @[Monitor.scala:627:34, :651:26] wire _same_cycle_resp_T; // @[Monitor.scala:684:44] assign _same_cycle_resp_T = _T_528; // @[Monitor.scala:651:26, :684:44] assign a_set = _T_598 & a_first_1; // @[Decoupled.scala:51:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = a_set ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:626:34, :646:40, :655:70, :657:{28,61}] assign a_sizes_set_interm = a_set ? 3'h5 : 3'h0; // @[Monitor.scala:626:34, :648:38, :655:70, :658:28] wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm}; // @[Monitor.scala:646:40, :659:54] assign a_opcodes_set = a_set ? _a_opcodes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:626:34, :630:33, :655:70, :659:{28,54}] wire [17:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm}; // @[Monitor.scala:648:38, :659:54, :660:52] assign a_sizes_set = a_set ? _a_sizes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:626:34, :632:31, :655:70, :660:{28,52}] wire d_clr; // @[Monitor.scala:664:34] wire d_clr_wo_ready; // @[Monitor.scala:665:34] wire [3:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [3:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_0 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_0; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_0; // @[Monitor.scala:673:46, :783:46] wire _T_577 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] assign d_clr_wo_ready = _T_577 & ~d_release_ack; // @[Monitor.scala:665:34, :673:46, :674:{26,71,74}] assign d_clr = _T_666 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] wire [3:0] _GEN_1 = {4{d_clr}}; // @[Monitor.scala:664:34, :668:33, :678:89, :680:21] assign d_opcodes_clr = _GEN_1; // @[Monitor.scala:668:33, :678:89, :680:21] assign d_sizes_clr = _GEN_1; // @[Monitor.scala:668:33, :670:31, :678:89, :680:21] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire same_cycle_resp = _same_cycle_resp_T_1; // @[Monitor.scala:684:{55,88}] wire [1:0] _inflight_T = {inflight[1], inflight[0] | a_set}; // @[Monitor.scala:614:27, :626:34, :705:27] wire _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [1:0] _inflight_T_2 = {1'h0, _inflight_T[0] & _inflight_T_1}; // @[Monitor.scala:705:{27,36,38}] wire [3:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [3:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [3:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [3:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [3:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [3:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [1:0] inflight_1; // @[Monitor.scala:726:35] wire [1:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [3:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [3:0] _c_opcode_lookup_T_1 = inflight_opcodes_1; // @[Monitor.scala:727:35, :749:44] wire [3:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [3:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [3:0] _c_size_lookup_T_1 = inflight_sizes_1; // @[Monitor.scala:728:35, :750:42] wire [3:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35] wire [1:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[1:0]; // @[package.scala:243:{71,76}] wire [1:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] reg d_first_counter_2; // @[Edges.scala:229:27] wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28] wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [15:0] _c_opcode_lookup_T_6 = {12'h0, _c_opcode_lookup_T_1}; // @[Monitor.scala:637:97, :749:{44,97}] wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [15:0] _c_size_lookup_T_6 = {12'h0, _c_size_lookup_T_1}; // @[Monitor.scala:637:97, :750:{42,93}] wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire d_clr_1; // @[Monitor.scala:774:34] wire d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [3:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [3:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_642 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_642 & d_release_ack_1; // @[Monitor.scala:775:34, :783:46, :784:{26,71}] assign d_clr_1 = _T_666 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] wire [3:0] _GEN_2 = {4{d_clr_1}}; // @[Monitor.scala:774:34, :776:34, :788:88, :790:21] assign d_opcodes_clr_1 = _GEN_2; // @[Monitor.scala:776:34, :788:88, :790:21] assign d_sizes_clr_1 = _GEN_2; // @[Monitor.scala:776:34, :777:34, :788:88, :790:21] wire _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [1:0] _inflight_T_5 = {1'h0, _inflight_T_3[0] & _inflight_T_4}; // @[Monitor.scala:814:{35,44,46}] wire [3:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [3:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [3:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [3:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } File consts.scala: //****************************************************************************** // Copyright (c) 2011 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Constants //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.common.constants import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util.Str import freechips.rocketchip.rocket.RVCExpander /** * Mixin for issue queue types */ trait IQType { val IQT_SZ = 3 val IQT_INT = 1.U(IQT_SZ.W) val IQT_MEM = 2.U(IQT_SZ.W) val IQT_FP = 4.U(IQT_SZ.W) val IQT_MFP = 6.U(IQT_SZ.W) } /** * Mixin for scalar operation constants */ trait ScalarOpConstants { val X = BitPat("b?") val Y = BitPat("b1") val N = BitPat("b0") //************************************ // Extra Constants // Which branch predictor predicted us val BSRC_SZ = 2 val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred val BSRC_C = 3.U(BSRC_SZ.W) // core branch resolution //************************************ // Control Signals // CFI types val CFI_SZ = 3 val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction val CFI_BR = 1.U(CFI_SZ.W) // Branch val CFI_JAL = 2.U(CFI_SZ.W) // JAL val CFI_JALR = 3.U(CFI_SZ.W) // JALR // PC Select Signal val PC_PLUS4 = 0.U(2.W) // PC + 4 val PC_BRJMP = 1.U(2.W) // brjmp_target val PC_JALR = 2.U(2.W) // jump_reg_target // Branch Type val BR_N = 0.U(4.W) // Next val BR_NE = 1.U(4.W) // Branch on NotEqual val BR_EQ = 2.U(4.W) // Branch on Equal val BR_GE = 3.U(4.W) // Branch on Greater/Equal val BR_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned val BR_LT = 5.U(4.W) // Branch on Less Than val BR_LTU = 6.U(4.W) // Branch on Less Than Unsigned val BR_J = 7.U(4.W) // Jump val BR_JR = 8.U(4.W) // Jump Register // RS1 Operand Select Signal val OP1_RS1 = 0.U(2.W) // Register Source #1 val OP1_ZERO= 1.U(2.W) val OP1_PC = 2.U(2.W) val OP1_X = BitPat("b??") // RS2 Operand Select Signal val OP2_RS2 = 0.U(3.W) // Register Source #2 val OP2_IMM = 1.U(3.W) // immediate val OP2_ZERO= 2.U(3.W) // constant 0 val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4) val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1 val OP2_X = BitPat("b???") // Register File Write Enable Signal val REN_0 = false.B val REN_1 = true.B // Is 32b Word or 64b Doubldword? val SZ_DW = 1 val DW_X = true.B // Bool(xLen==64) val DW_32 = false.B val DW_64 = true.B val DW_XPR = true.B // Bool(xLen==64) // Memory Enable Signal val MEN_0 = false.B val MEN_1 = true.B val MEN_X = false.B // Immediate Extend Select val IS_I = 0.U(3.W) // I-Type (LD,ALU) val IS_S = 1.U(3.W) // S-Type (ST) val IS_B = 2.U(3.W) // SB-Type (BR) val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC) val IS_J = 4.U(3.W) // UJ-Type (J/JAL) val IS_X = BitPat("b???") // Decode Stage Control Signals val RT_FIX = 0.U(2.W) val RT_FLT = 1.U(2.W) val RT_PAS = 3.U(2.W) // pass-through (prs1 := lrs1, etc) val RT_X = 2.U(2.W) // not-a-register (but shouldn't get a busy-bit, etc.) // TODO rename RT_NAR // Micro-op opcodes // TODO change micro-op opcodes into using enum val UOPC_SZ = 7 val uopX = BitPat.dontCare(UOPC_SZ) val uopNOP = 0.U(UOPC_SZ.W) val uopLD = 1.U(UOPC_SZ.W) val uopSTA = 2.U(UOPC_SZ.W) // store address generation val uopSTD = 3.U(UOPC_SZ.W) // store data generation val uopLUI = 4.U(UOPC_SZ.W) val uopADDI = 5.U(UOPC_SZ.W) val uopANDI = 6.U(UOPC_SZ.W) val uopORI = 7.U(UOPC_SZ.W) val uopXORI = 8.U(UOPC_SZ.W) val uopSLTI = 9.U(UOPC_SZ.W) val uopSLTIU= 10.U(UOPC_SZ.W) val uopSLLI = 11.U(UOPC_SZ.W) val uopSRAI = 12.U(UOPC_SZ.W) val uopSRLI = 13.U(UOPC_SZ.W) val uopSLL = 14.U(UOPC_SZ.W) val uopADD = 15.U(UOPC_SZ.W) val uopSUB = 16.U(UOPC_SZ.W) val uopSLT = 17.U(UOPC_SZ.W) val uopSLTU = 18.U(UOPC_SZ.W) val uopAND = 19.U(UOPC_SZ.W) val uopOR = 20.U(UOPC_SZ.W) val uopXOR = 21.U(UOPC_SZ.W) val uopSRA = 22.U(UOPC_SZ.W) val uopSRL = 23.U(UOPC_SZ.W) val uopBEQ = 24.U(UOPC_SZ.W) val uopBNE = 25.U(UOPC_SZ.W) val uopBGE = 26.U(UOPC_SZ.W) val uopBGEU = 27.U(UOPC_SZ.W) val uopBLT = 28.U(UOPC_SZ.W) val uopBLTU = 29.U(UOPC_SZ.W) val uopCSRRW= 30.U(UOPC_SZ.W) val uopCSRRS= 31.U(UOPC_SZ.W) val uopCSRRC= 32.U(UOPC_SZ.W) val uopCSRRWI=33.U(UOPC_SZ.W) val uopCSRRSI=34.U(UOPC_SZ.W) val uopCSRRCI=35.U(UOPC_SZ.W) val uopJ = 36.U(UOPC_SZ.W) val uopJAL = 37.U(UOPC_SZ.W) val uopJALR = 38.U(UOPC_SZ.W) val uopAUIPC= 39.U(UOPC_SZ.W) //val uopSRET = 40.U(UOPC_SZ.W) val uopCFLSH= 41.U(UOPC_SZ.W) val uopFENCE= 42.U(UOPC_SZ.W) val uopADDIW= 43.U(UOPC_SZ.W) val uopADDW = 44.U(UOPC_SZ.W) val uopSUBW = 45.U(UOPC_SZ.W) val uopSLLIW= 46.U(UOPC_SZ.W) val uopSLLW = 47.U(UOPC_SZ.W) val uopSRAIW= 48.U(UOPC_SZ.W) val uopSRAW = 49.U(UOPC_SZ.W) val uopSRLIW= 50.U(UOPC_SZ.W) val uopSRLW = 51.U(UOPC_SZ.W) val uopMUL = 52.U(UOPC_SZ.W) val uopMULH = 53.U(UOPC_SZ.W) val uopMULHU= 54.U(UOPC_SZ.W) val uopMULHSU=55.U(UOPC_SZ.W) val uopMULW = 56.U(UOPC_SZ.W) val uopDIV = 57.U(UOPC_SZ.W) val uopDIVU = 58.U(UOPC_SZ.W) val uopREM = 59.U(UOPC_SZ.W) val uopREMU = 60.U(UOPC_SZ.W) val uopDIVW = 61.U(UOPC_SZ.W) val uopDIVUW= 62.U(UOPC_SZ.W) val uopREMW = 63.U(UOPC_SZ.W) val uopREMUW= 64.U(UOPC_SZ.W) val uopFENCEI = 65.U(UOPC_SZ.W) // = 66.U(UOPC_SZ.W) val uopAMO_AG = 67.U(UOPC_SZ.W) // AMO-address gen (use normal STD for datagen) val uopFMV_W_X = 68.U(UOPC_SZ.W) val uopFMV_D_X = 69.U(UOPC_SZ.W) val uopFMV_X_W = 70.U(UOPC_SZ.W) val uopFMV_X_D = 71.U(UOPC_SZ.W) val uopFSGNJ_S = 72.U(UOPC_SZ.W) val uopFSGNJ_D = 73.U(UOPC_SZ.W) val uopFCVT_S_D = 74.U(UOPC_SZ.W) val uopFCVT_D_S = 75.U(UOPC_SZ.W) val uopFCVT_S_X = 76.U(UOPC_SZ.W) val uopFCVT_D_X = 77.U(UOPC_SZ.W) val uopFCVT_X_S = 78.U(UOPC_SZ.W) val uopFCVT_X_D = 79.U(UOPC_SZ.W) val uopCMPR_S = 80.U(UOPC_SZ.W) val uopCMPR_D = 81.U(UOPC_SZ.W) val uopFCLASS_S = 82.U(UOPC_SZ.W) val uopFCLASS_D = 83.U(UOPC_SZ.W) val uopFMINMAX_S = 84.U(UOPC_SZ.W) val uopFMINMAX_D = 85.U(UOPC_SZ.W) // = 86.U(UOPC_SZ.W) val uopFADD_S = 87.U(UOPC_SZ.W) val uopFSUB_S = 88.U(UOPC_SZ.W) val uopFMUL_S = 89.U(UOPC_SZ.W) val uopFADD_D = 90.U(UOPC_SZ.W) val uopFSUB_D = 91.U(UOPC_SZ.W) val uopFMUL_D = 92.U(UOPC_SZ.W) val uopFMADD_S = 93.U(UOPC_SZ.W) val uopFMSUB_S = 94.U(UOPC_SZ.W) val uopFNMADD_S = 95.U(UOPC_SZ.W) val uopFNMSUB_S = 96.U(UOPC_SZ.W) val uopFMADD_D = 97.U(UOPC_SZ.W) val uopFMSUB_D = 98.U(UOPC_SZ.W) val uopFNMADD_D = 99.U(UOPC_SZ.W) val uopFNMSUB_D = 100.U(UOPC_SZ.W) val uopFDIV_S = 101.U(UOPC_SZ.W) val uopFDIV_D = 102.U(UOPC_SZ.W) val uopFSQRT_S = 103.U(UOPC_SZ.W) val uopFSQRT_D = 104.U(UOPC_SZ.W) val uopWFI = 105.U(UOPC_SZ.W) // pass uop down the CSR pipeline val uopERET = 106.U(UOPC_SZ.W) // pass uop down the CSR pipeline, also is ERET val uopSFENCE = 107.U(UOPC_SZ.W) val uopROCC = 108.U(UOPC_SZ.W) val uopMOV = 109.U(UOPC_SZ.W) // conditional mov decoded from "add rd, x0, rs2" // The Bubble Instruction (Machine generated NOP) // Insert (XOR x0,x0,x0) which is different from software compiler // generated NOPs which are (ADDI x0, x0, 0). // Reasoning for this is to let visualizers and stat-trackers differentiate // between software NOPs and machine-generated Bubbles in the pipeline. val BUBBLE = (0x4033).U(32.W) def NullMicroOp()(implicit p: Parameters): boom.v3.common.MicroOp = { val uop = Wire(new boom.v3.common.MicroOp) uop := DontCare // Overridden in the following lines uop.uopc := uopNOP // maybe not required, but helps on asserts that try to catch spurious behavior uop.bypassable := false.B uop.fp_val := false.B uop.uses_stq := false.B uop.uses_ldq := false.B uop.pdst := 0.U uop.dst_rtype := RT_X val cs = Wire(new boom.v3.common.CtrlSignals()) cs := DontCare // Overridden in the following lines cs.br_type := BR_N cs.csr_cmd := freechips.rocketchip.rocket.CSR.N cs.is_load := false.B cs.is_sta := false.B cs.is_std := false.B uop.ctrl := cs uop } } /** * Mixin for RISCV constants */ trait RISCVConstants { // abstract out instruction decode magic numbers val RD_MSB = 11 val RD_LSB = 7 val RS1_MSB = 19 val RS1_LSB = 15 val RS2_MSB = 24 val RS2_LSB = 20 val RS3_MSB = 31 val RS3_LSB = 27 val CSR_ADDR_MSB = 31 val CSR_ADDR_LSB = 20 val CSR_ADDR_SZ = 12 // location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.) val SHAMT_5_BIT = 25 val LONGEST_IMM_SZ = 20 val X0 = 0.U val RA = 1.U // return address register // memory consistency model // The C/C++ atomics MCM requires that two loads to the same address maintain program order. // The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior). val MCM_ORDER_DEPENDENT_LOADS = true val jal_opc = (0x6f).U val jalr_opc = (0x67).U def GetUop(inst: UInt): UInt = inst(6,0) def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB) def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB) def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = { val rvc_exp = Module(new RVCExpander) rvc_exp.io.in := inst Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst) } // Note: Accepts only EXPANDED rvc instructions def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) ((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) ((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = { val bdecode = Module(new boom.v3.exu.BranchDecode) bdecode.io.inst := inst bdecode.io.pc := 0.U bdecode.io.out.cfi_type } } /** * Mixin for exception cause constants */ trait ExcCauseConstants { // a memory disambigious misspeculation occurred val MINI_EXCEPTION_MEM_ORDERING = 16.U val MINI_EXCEPTION_CSR_REPLAY = 17.U require (!freechips.rocketchip.rocket.Causes.all.contains(16)) require (!freechips.rocketchip.rocket.Causes.all.contains(17)) } File issue-slot.scala: //****************************************************************************** // Copyright (c) 2015 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Issue Slot Logic //-------------------------------------------------------------------------- //------------------------------------------------------------------------------ // // Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot. // TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores. // TODO Disable ldspec for FP queue. package boom.v3.exu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import boom.v3.common._ import boom.v3.util._ import FUConstants._ /** * IO bundle to interact with Issue slot * * @param numWakeupPorts number of wakeup ports for the slot */ class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle { val valid = Output(Bool()) val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely? val request = Output(Bool()) val request_hp = Output(Bool()) val grant = Input(Bool()) val brupdate = Input(new BrUpdateInfo()) val kill = Input(Bool()) // pipeline flush val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant) val ldspec_miss = Input(Bool()) // Previous cycle's speculative load wakeup was mispredicted. val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new IqWakeup(maxPregSz)))) val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W))) val spec_ld_wakeup = Flipped(Vec(memWidth, Valid(UInt(width=maxPregSz.W)))) val in_uop = Flipped(Valid(new MicroOp())) // if valid, this WILL overwrite an entry! val out_uop = Output(new MicroOp()) // the updated slot uop; will be shifted upwards in a collasping queue. val uop = Output(new MicroOp()) // the current Slot's uop. Sent down the pipeline when issued. val debug = { val result = new Bundle { val p1 = Bool() val p2 = Bool() val p3 = Bool() val ppred = Bool() val state = UInt(width=2.W) } Output(result) } } /** * Single issue slot. Holds a uop within the issue queue * * @param numWakeupPorts number of wakeup ports */ class IssueSlot(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomModule with IssueUnitConstants { val io = IO(new IssueSlotIO(numWakeupPorts)) // slot invalid? // slot is valid, holding 1 uop // slot is valid, holds 2 uops (like a store) def is_invalid = state === s_invalid def is_valid = state =/= s_invalid val next_state = Wire(UInt()) // the next state of this slot (which might then get moved to a new slot) val next_uopc = Wire(UInt()) // the next uopc of this slot (which might then get moved to a new slot) val next_lrs1_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot) val next_lrs2_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot) val state = RegInit(s_invalid) val p1 = RegInit(false.B) val p2 = RegInit(false.B) val p3 = RegInit(false.B) val ppred = RegInit(false.B) // Poison if woken up by speculative load. // Poison lasts 1 cycle (as ldMiss will come on the next cycle). // SO if poisoned is true, set it to false! val p1_poisoned = RegInit(false.B) val p2_poisoned = RegInit(false.B) p1_poisoned := false.B p2_poisoned := false.B val next_p1_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p1_poisoned, p1_poisoned) val next_p2_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p2_poisoned, p2_poisoned) val slot_uop = RegInit(NullMicroOp) val next_uop = Mux(io.in_uop.valid, io.in_uop.bits, slot_uop) //----------------------------------------------------------------------------- // next slot state computation // compute the next state for THIS entry slot (in a collasping queue, the // current uop may get moved elsewhere, and a new uop can enter when (io.kill) { state := s_invalid } .elsewhen (io.in_uop.valid) { state := io.in_uop.bits.iw_state } .elsewhen (io.clear) { state := s_invalid } .otherwise { state := next_state } //----------------------------------------------------------------------------- // "update" state // compute the next state for the micro-op in this slot. This micro-op may // be moved elsewhere, so the "next_state" travels with it. // defaults next_state := state next_uopc := slot_uop.uopc next_lrs1_rtype := slot_uop.lrs1_rtype next_lrs2_rtype := slot_uop.lrs2_rtype when (io.kill) { next_state := s_invalid } .elsewhen ((io.grant && (state === s_valid_1)) || (io.grant && (state === s_valid_2) && p1 && p2 && ppred)) { // try to issue this uop. when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) { next_state := s_invalid } } .elsewhen (io.grant && (state === s_valid_2)) { when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) { next_state := s_valid_1 when (p1) { slot_uop.uopc := uopSTD next_uopc := uopSTD slot_uop.lrs1_rtype := RT_X next_lrs1_rtype := RT_X } .otherwise { slot_uop.lrs2_rtype := RT_X next_lrs2_rtype := RT_X } } } when (io.in_uop.valid) { slot_uop := io.in_uop.bits assert (is_invalid || io.clear || io.kill, "trying to overwrite a valid issue slot.") } // Wakeup Compare Logic // these signals are the "next_p*" for the current slot's micro-op. // they are important for shifting the current slot_uop up to an other entry. val next_p1 = WireInit(p1) val next_p2 = WireInit(p2) val next_p3 = WireInit(p3) val next_ppred = WireInit(ppred) when (io.in_uop.valid) { p1 := !(io.in_uop.bits.prs1_busy) p2 := !(io.in_uop.bits.prs2_busy) p3 := !(io.in_uop.bits.prs3_busy) ppred := !(io.in_uop.bits.ppred_busy) } when (io.ldspec_miss && next_p1_poisoned) { assert(next_uop.prs1 =/= 0.U, "Poison bit can't be set for prs1=x0!") p1 := false.B } when (io.ldspec_miss && next_p2_poisoned) { assert(next_uop.prs2 =/= 0.U, "Poison bit can't be set for prs2=x0!") p2 := false.B } for (i <- 0 until numWakeupPorts) { when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs1)) { p1 := true.B } when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs2)) { p2 := true.B } when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs3)) { p3 := true.B } } when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === next_uop.ppred) { ppred := true.B } for (w <- 0 until memWidth) { assert (!(io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === 0.U), "Loads to x0 should never speculatively wakeup other instructions") } // TODO disable if FP IQ. for (w <- 0 until memWidth) { when (io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === next_uop.prs1 && next_uop.lrs1_rtype === RT_FIX) { p1 := true.B p1_poisoned := true.B assert (!next_p1_poisoned) } when (io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === next_uop.prs2 && next_uop.lrs2_rtype === RT_FIX) { p2 := true.B p2_poisoned := true.B assert (!next_p2_poisoned) } } // Handle branch misspeculations val next_br_mask = GetNewBrMask(io.brupdate, slot_uop) // was this micro-op killed by a branch? if yes, we can't let it be valid if // we compact it into an other entry when (IsKilledByBranch(io.brupdate, slot_uop)) { next_state := s_invalid } when (!io.in_uop.valid) { slot_uop.br_mask := next_br_mask } //------------------------------------------------------------- // Request Logic io.request := is_valid && p1 && p2 && p3 && ppred && !io.kill val high_priority = slot_uop.is_br || slot_uop.is_jal || slot_uop.is_jalr io.request_hp := io.request && high_priority when (state === s_valid_1) { io.request := p1 && p2 && p3 && ppred && !io.kill } .elsewhen (state === s_valid_2) { io.request := (p1 || p2) && ppred && !io.kill } .otherwise { io.request := false.B } //assign outputs io.valid := is_valid io.uop := slot_uop io.uop.iw_p1_poisoned := p1_poisoned io.uop.iw_p2_poisoned := p2_poisoned // micro-op will vacate due to grant. val may_vacate = io.grant && ((state === s_valid_1) || (state === s_valid_2) && p1 && p2 && ppred) val squash_grant = io.ldspec_miss && (p1_poisoned || p2_poisoned) io.will_be_valid := is_valid && !(may_vacate && !squash_grant) io.out_uop := slot_uop io.out_uop.iw_state := next_state io.out_uop.uopc := next_uopc io.out_uop.lrs1_rtype := next_lrs1_rtype io.out_uop.lrs2_rtype := next_lrs2_rtype io.out_uop.br_mask := next_br_mask io.out_uop.prs1_busy := !p1 io.out_uop.prs2_busy := !p2 io.out_uop.prs3_busy := !p3 io.out_uop.ppred_busy := !ppred io.out_uop.iw_p1_poisoned := p1_poisoned io.out_uop.iw_p2_poisoned := p2_poisoned when (state === s_valid_2) { when (p1 && p2 && ppred) { ; // send out the entire instruction as one uop } .elsewhen (p1 && ppred) { io.uop.uopc := slot_uop.uopc io.uop.lrs2_rtype := RT_X } .elsewhen (p2 && ppred) { io.uop.uopc := uopSTD io.uop.lrs1_rtype := RT_X } } // debug outputs io.debug.p1 := p1 io.debug.p2 := p2 io.debug.p3 := p3 io.debug.ppred := ppred io.debug.state := state }
module IssueSlot_57( // @[issue-slot.scala:69:7] input clock, // @[issue-slot.scala:69:7] input reset, // @[issue-slot.scala:69:7] output io_valid, // @[issue-slot.scala:73:14] output io_will_be_valid, // @[issue-slot.scala:73:14] output io_request, // @[issue-slot.scala:73:14] output io_request_hp, // @[issue-slot.scala:73:14] input io_grant, // @[issue-slot.scala:73:14] input [15:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:73:14] input [15:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_uopc, // @[issue-slot.scala:73:14] input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:73:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:73:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[issue-slot.scala:73:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[issue-slot.scala:73:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_load, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_std, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_br, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_jalr, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_jal, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:73:14] input [15:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:73:14] input [3:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_taken, // @[issue-slot.scala:73:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:73:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_exception, // @[issue-slot.scala:73:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bypassable, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ldst_val, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_fp_single, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:73:14] input io_brupdate_b2_valid, // @[issue-slot.scala:73:14] input io_brupdate_b2_mispredict, // @[issue-slot.scala:73:14] input io_brupdate_b2_taken, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:73:14] input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:73:14] input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:73:14] input io_kill, // @[issue-slot.scala:73:14] input io_clear, // @[issue-slot.scala:73:14] input io_ldspec_miss, // @[issue-slot.scala:73:14] input io_wakeup_ports_0_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_0_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_0_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_1_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_1_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_1_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_2_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_2_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_2_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_3_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_3_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_3_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_4_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_4_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_4_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_5_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_5_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_5_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_6_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_6_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_6_bits_poisoned, // @[issue-slot.scala:73:14] input io_spec_ld_wakeup_0_valid, // @[issue-slot.scala:73:14] input [6:0] io_spec_ld_wakeup_0_bits, // @[issue-slot.scala:73:14] input io_in_uop_valid, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_uopc, // @[issue-slot.scala:73:14] input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:73:14] input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_rvc, // @[issue-slot.scala:73:14] input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_iq_type, // @[issue-slot.scala:73:14] input [9:0] io_in_uop_bits_fu_code, // @[issue-slot.scala:73:14] input [3:0] io_in_uop_bits_ctrl_br_type, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_ctrl_op1_sel, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_op2_sel, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_imm_sel, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ctrl_op_fcn, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_fcn_dw, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_csr_cmd, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_load, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_sta, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_std, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_iw_state, // @[issue-slot.scala:73:14] input io_in_uop_bits_iw_p1_poisoned, // @[issue-slot.scala:73:14] input io_in_uop_bits_iw_p2_poisoned, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_br, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_jalr, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_jal, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_sfb, // @[issue-slot.scala:73:14] input [15:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:73:14] input [3:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:73:14] input io_in_uop_bits_edge_inst, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:73:14] input io_in_uop_bits_taken, // @[issue-slot.scala:73:14] input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:73:14] input [11:0] io_in_uop_bits_csr_addr, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_pdst, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_prs1, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_prs2, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_prs3, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ppred, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:73:14] input io_in_uop_bits_exception, // @[issue-slot.scala:73:14] input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:73:14] input io_in_uop_bits_bypassable, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:73:14] input io_in_uop_bits_mem_signed, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_fence, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_fencei, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_amo, // @[issue-slot.scala:73:14] input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:73:14] input io_in_uop_bits_uses_stq, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_unique, // @[issue-slot.scala:73:14] input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:73:14] input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:73:14] input io_in_uop_bits_ldst_val, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:73:14] input io_in_uop_bits_frs3_en, // @[issue-slot.scala:73:14] input io_in_uop_bits_fp_val, // @[issue-slot.scala:73:14] input io_in_uop_bits_fp_single, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_uopc, // @[issue-slot.scala:73:14] output [31:0] io_out_uop_inst, // @[issue-slot.scala:73:14] output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:73:14] output io_out_uop_is_rvc, // @[issue-slot.scala:73:14] output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_iq_type, // @[issue-slot.scala:73:14] output [9:0] io_out_uop_fu_code, // @[issue-slot.scala:73:14] output [3:0] io_out_uop_ctrl_br_type, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_load, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_std, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_iw_state, // @[issue-slot.scala:73:14] output io_out_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14] output io_out_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14] output io_out_uop_is_br, // @[issue-slot.scala:73:14] output io_out_uop_is_jalr, // @[issue-slot.scala:73:14] output io_out_uop_is_jal, // @[issue-slot.scala:73:14] output io_out_uop_is_sfb, // @[issue-slot.scala:73:14] output [15:0] io_out_uop_br_mask, // @[issue-slot.scala:73:14] output [3:0] io_out_uop_br_tag, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ftq_idx, // @[issue-slot.scala:73:14] output io_out_uop_edge_inst, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:73:14] output io_out_uop_taken, // @[issue-slot.scala:73:14] output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:73:14] output [11:0] io_out_uop_csr_addr, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_rob_idx, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ldq_idx, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_stq_idx, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_pdst, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_prs1, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_prs2, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_prs3, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ppred, // @[issue-slot.scala:73:14] output io_out_uop_prs1_busy, // @[issue-slot.scala:73:14] output io_out_uop_prs2_busy, // @[issue-slot.scala:73:14] output io_out_uop_prs3_busy, // @[issue-slot.scala:73:14] output io_out_uop_ppred_busy, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_stale_pdst, // @[issue-slot.scala:73:14] output io_out_uop_exception, // @[issue-slot.scala:73:14] output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:73:14] output io_out_uop_bypassable, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:73:14] output io_out_uop_mem_signed, // @[issue-slot.scala:73:14] output io_out_uop_is_fence, // @[issue-slot.scala:73:14] output io_out_uop_is_fencei, // @[issue-slot.scala:73:14] output io_out_uop_is_amo, // @[issue-slot.scala:73:14] output io_out_uop_uses_ldq, // @[issue-slot.scala:73:14] output io_out_uop_uses_stq, // @[issue-slot.scala:73:14] output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] output io_out_uop_is_unique, // @[issue-slot.scala:73:14] output io_out_uop_flush_on_commit, // @[issue-slot.scala:73:14] output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_ldst, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:73:14] output io_out_uop_ldst_val, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:73:14] output io_out_uop_frs3_en, // @[issue-slot.scala:73:14] output io_out_uop_fp_val, // @[issue-slot.scala:73:14] output io_out_uop_fp_single, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] output io_out_uop_bp_debug_if, // @[issue-slot.scala:73:14] output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:73:14] output [6:0] io_uop_uopc, // @[issue-slot.scala:73:14] output [31:0] io_uop_inst, // @[issue-slot.scala:73:14] output [31:0] io_uop_debug_inst, // @[issue-slot.scala:73:14] output io_uop_is_rvc, // @[issue-slot.scala:73:14] output [39:0] io_uop_debug_pc, // @[issue-slot.scala:73:14] output [2:0] io_uop_iq_type, // @[issue-slot.scala:73:14] output [9:0] io_uop_fu_code, // @[issue-slot.scala:73:14] output [3:0] io_uop_ctrl_br_type, // @[issue-slot.scala:73:14] output [1:0] io_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] output [4:0] io_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] output io_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_load, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_std, // @[issue-slot.scala:73:14] output [1:0] io_uop_iw_state, // @[issue-slot.scala:73:14] output io_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14] output io_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14] output io_uop_is_br, // @[issue-slot.scala:73:14] output io_uop_is_jalr, // @[issue-slot.scala:73:14] output io_uop_is_jal, // @[issue-slot.scala:73:14] output io_uop_is_sfb, // @[issue-slot.scala:73:14] output [15:0] io_uop_br_mask, // @[issue-slot.scala:73:14] output [3:0] io_uop_br_tag, // @[issue-slot.scala:73:14] output [4:0] io_uop_ftq_idx, // @[issue-slot.scala:73:14] output io_uop_edge_inst, // @[issue-slot.scala:73:14] output [5:0] io_uop_pc_lob, // @[issue-slot.scala:73:14] output io_uop_taken, // @[issue-slot.scala:73:14] output [19:0] io_uop_imm_packed, // @[issue-slot.scala:73:14] output [11:0] io_uop_csr_addr, // @[issue-slot.scala:73:14] output [6:0] io_uop_rob_idx, // @[issue-slot.scala:73:14] output [4:0] io_uop_ldq_idx, // @[issue-slot.scala:73:14] output [4:0] io_uop_stq_idx, // @[issue-slot.scala:73:14] output [1:0] io_uop_rxq_idx, // @[issue-slot.scala:73:14] output [6:0] io_uop_pdst, // @[issue-slot.scala:73:14] output [6:0] io_uop_prs1, // @[issue-slot.scala:73:14] output [6:0] io_uop_prs2, // @[issue-slot.scala:73:14] output [6:0] io_uop_prs3, // @[issue-slot.scala:73:14] output [4:0] io_uop_ppred, // @[issue-slot.scala:73:14] output io_uop_prs1_busy, // @[issue-slot.scala:73:14] output io_uop_prs2_busy, // @[issue-slot.scala:73:14] output io_uop_prs3_busy, // @[issue-slot.scala:73:14] output io_uop_ppred_busy, // @[issue-slot.scala:73:14] output [6:0] io_uop_stale_pdst, // @[issue-slot.scala:73:14] output io_uop_exception, // @[issue-slot.scala:73:14] output [63:0] io_uop_exc_cause, // @[issue-slot.scala:73:14] output io_uop_bypassable, // @[issue-slot.scala:73:14] output [4:0] io_uop_mem_cmd, // @[issue-slot.scala:73:14] output [1:0] io_uop_mem_size, // @[issue-slot.scala:73:14] output io_uop_mem_signed, // @[issue-slot.scala:73:14] output io_uop_is_fence, // @[issue-slot.scala:73:14] output io_uop_is_fencei, // @[issue-slot.scala:73:14] output io_uop_is_amo, // @[issue-slot.scala:73:14] output io_uop_uses_ldq, // @[issue-slot.scala:73:14] output io_uop_uses_stq, // @[issue-slot.scala:73:14] output io_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] output io_uop_is_unique, // @[issue-slot.scala:73:14] output io_uop_flush_on_commit, // @[issue-slot.scala:73:14] output io_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] output [5:0] io_uop_ldst, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs1, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs2, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs3, // @[issue-slot.scala:73:14] output io_uop_ldst_val, // @[issue-slot.scala:73:14] output [1:0] io_uop_dst_rtype, // @[issue-slot.scala:73:14] output [1:0] io_uop_lrs1_rtype, // @[issue-slot.scala:73:14] output [1:0] io_uop_lrs2_rtype, // @[issue-slot.scala:73:14] output io_uop_frs3_en, // @[issue-slot.scala:73:14] output io_uop_fp_val, // @[issue-slot.scala:73:14] output io_uop_fp_single, // @[issue-slot.scala:73:14] output io_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] output io_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] output io_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] output io_uop_bp_debug_if, // @[issue-slot.scala:73:14] output io_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] output [1:0] io_uop_debug_fsrc, // @[issue-slot.scala:73:14] output [1:0] io_uop_debug_tsrc, // @[issue-slot.scala:73:14] output io_debug_p1, // @[issue-slot.scala:73:14] output io_debug_p2, // @[issue-slot.scala:73:14] output io_debug_p3, // @[issue-slot.scala:73:14] output io_debug_ppred, // @[issue-slot.scala:73:14] output [1:0] io_debug_state // @[issue-slot.scala:73:14] ); wire io_grant_0 = io_grant; // @[issue-slot.scala:69:7] wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:69:7] wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[issue-slot.scala:69:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:69:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:69:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[issue-slot.scala:69:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[issue-slot.scala:69:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:69:7] wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:69:7] wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:69:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:69:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:69:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:69:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[issue-slot.scala:69:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:69:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:69:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:69:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:69:7] wire io_kill_0 = io_kill; // @[issue-slot.scala:69:7] wire io_clear_0 = io_clear; // @[issue-slot.scala:69:7] wire io_ldspec_miss_0 = io_ldspec_miss; // @[issue-slot.scala:69:7] wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_0_bits_pdst_0 = io_wakeup_ports_0_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_0_bits_poisoned_0 = io_wakeup_ports_0_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_1_bits_pdst_0 = io_wakeup_ports_1_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_1_bits_poisoned_0 = io_wakeup_ports_1_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_2_valid_0 = io_wakeup_ports_2_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_2_bits_pdst_0 = io_wakeup_ports_2_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_2_bits_poisoned_0 = io_wakeup_ports_2_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_3_valid_0 = io_wakeup_ports_3_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_3_bits_pdst_0 = io_wakeup_ports_3_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_3_bits_poisoned_0 = io_wakeup_ports_3_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_4_valid_0 = io_wakeup_ports_4_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_4_bits_pdst_0 = io_wakeup_ports_4_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_4_bits_poisoned_0 = io_wakeup_ports_4_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_5_valid_0 = io_wakeup_ports_5_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_5_bits_pdst_0 = io_wakeup_ports_5_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_5_bits_poisoned_0 = io_wakeup_ports_5_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_6_valid_0 = io_wakeup_ports_6_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_6_bits_pdst_0 = io_wakeup_ports_6_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_6_bits_poisoned_0 = io_wakeup_ports_6_bits_poisoned; // @[issue-slot.scala:69:7] wire io_spec_ld_wakeup_0_valid_0 = io_spec_ld_wakeup_0_valid; // @[issue-slot.scala:69:7] wire [6:0] io_spec_ld_wakeup_0_bits_0 = io_spec_ld_wakeup_0_bits; // @[issue-slot.scala:69:7] wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_uopc_0 = io_in_uop_bits_uopc; // @[issue-slot.scala:69:7] wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:69:7] wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:69:7] wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_iq_type_0 = io_in_uop_bits_iq_type; // @[issue-slot.scala:69:7] wire [9:0] io_in_uop_bits_fu_code_0 = io_in_uop_bits_fu_code; // @[issue-slot.scala:69:7] wire [3:0] io_in_uop_bits_ctrl_br_type_0 = io_in_uop_bits_ctrl_br_type; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_ctrl_op1_sel_0 = io_in_uop_bits_ctrl_op1_sel; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_op2_sel_0 = io_in_uop_bits_ctrl_op2_sel; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_imm_sel_0 = io_in_uop_bits_ctrl_imm_sel; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ctrl_op_fcn_0 = io_in_uop_bits_ctrl_op_fcn; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_fcn_dw_0 = io_in_uop_bits_ctrl_fcn_dw; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_csr_cmd_0 = io_in_uop_bits_ctrl_csr_cmd; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_load_0 = io_in_uop_bits_ctrl_is_load; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_sta_0 = io_in_uop_bits_ctrl_is_sta; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_std_0 = io_in_uop_bits_ctrl_is_std; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_iw_state_0 = io_in_uop_bits_iw_state; // @[issue-slot.scala:69:7] wire io_in_uop_bits_iw_p1_poisoned_0 = io_in_uop_bits_iw_p1_poisoned; // @[issue-slot.scala:69:7] wire io_in_uop_bits_iw_p2_poisoned_0 = io_in_uop_bits_iw_p2_poisoned; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_br_0 = io_in_uop_bits_is_br; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_jalr_0 = io_in_uop_bits_is_jalr; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_jal_0 = io_in_uop_bits_is_jal; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:69:7] wire [15:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:69:7] wire [3:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:69:7] wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:69:7] wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:69:7] wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:69:7] wire [11:0] io_in_uop_bits_csr_addr_0 = io_in_uop_bits_csr_addr; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ppred_0 = io_in_uop_bits_ppred; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:69:7] wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:69:7] wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bypassable_0 = io_in_uop_bits_bypassable; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:69:7] wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:69:7] wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:69:7] wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:69:7] wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ldst_val_0 = io_in_uop_bits_ldst_val; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:69:7] wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:69:7] wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:69:7] wire io_in_uop_bits_fp_single_0 = io_in_uop_bits_fp_single; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:69:7] wire io_pred_wakeup_port_valid = 1'h0; // @[issue-slot.scala:69:7] wire slot_uop_uop_is_rvc = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_fcn_dw = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_load = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_sta = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_std = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_iw_p1_poisoned = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_iw_p2_poisoned = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_br = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_jalr = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_jal = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_sfb = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_edge_inst = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_taken = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs1_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs2_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs3_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ppred_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_exception = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bypassable = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_mem_signed = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_fence = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_fencei = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_amo = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_uses_ldq = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_uses_stq = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_sys_pc2epc = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_unique = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_flush_on_commit = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ldst_is_rs1 = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ldst_val = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_frs3_en = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_fp_val = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_fp_single = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_pf_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_ae_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_ma_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bp_debug_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bp_xcpt_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_cs_fcn_dw = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_load = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_sta = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_std = 1'h0; // @[consts.scala:279:18] wire [4:0] io_pred_wakeup_port_bits = 5'h0; // @[issue-slot.scala:69:7] wire [4:0] slot_uop_uop_ctrl_op_fcn = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_ftq_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_ldq_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_stq_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_ppred = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_mem_cmd = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_cs_op_fcn = 5'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_uop_iq_type = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_op2_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_imm_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_csr_cmd = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_cs_op2_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_cs_imm_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_cs_csr_cmd = 3'h0; // @[consts.scala:279:18] wire [1:0] slot_uop_uop_ctrl_op1_sel = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_iw_state = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_rxq_idx = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_mem_size = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_lrs1_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_lrs2_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_debug_fsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_debug_tsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_cs_op1_sel = 2'h0; // @[consts.scala:279:18] wire [3:0] slot_uop_uop_ctrl_br_type = 4'h0; // @[consts.scala:269:19] wire [3:0] slot_uop_uop_br_tag = 4'h0; // @[consts.scala:269:19] wire [3:0] slot_uop_cs_br_type = 4'h0; // @[consts.scala:279:18] wire [1:0] slot_uop_uop_dst_rtype = 2'h2; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_pc_lob = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_ldst = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs3 = 6'h0; // @[consts.scala:269:19] wire [63:0] slot_uop_uop_exc_cause = 64'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_uopc = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_rob_idx = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_pdst = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_prs1 = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_prs2 = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_prs3 = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_stale_pdst = 7'h0; // @[consts.scala:269:19] wire [11:0] slot_uop_uop_csr_addr = 12'h0; // @[consts.scala:269:19] wire [19:0] slot_uop_uop_imm_packed = 20'h0; // @[consts.scala:269:19] wire [15:0] slot_uop_uop_br_mask = 16'h0; // @[consts.scala:269:19] wire [9:0] slot_uop_uop_fu_code = 10'h0; // @[consts.scala:269:19] wire [39:0] slot_uop_uop_debug_pc = 40'h0; // @[consts.scala:269:19] wire [31:0] slot_uop_uop_inst = 32'h0; // @[consts.scala:269:19] wire [31:0] slot_uop_uop_debug_inst = 32'h0; // @[consts.scala:269:19] wire _io_valid_T; // @[issue-slot.scala:79:24] wire _io_will_be_valid_T_4; // @[issue-slot.scala:262:32] wire _io_request_hp_T; // @[issue-slot.scala:243:31] wire [6:0] next_uopc; // @[issue-slot.scala:82:29] wire [1:0] next_state; // @[issue-slot.scala:81:29] wire [15:0] next_br_mask; // @[util.scala:85:25] wire _io_out_uop_prs1_busy_T; // @[issue-slot.scala:270:28] wire _io_out_uop_prs2_busy_T; // @[issue-slot.scala:271:28] wire _io_out_uop_prs3_busy_T; // @[issue-slot.scala:272:28] wire _io_out_uop_ppred_busy_T; // @[issue-slot.scala:273:28] wire [1:0] next_lrs1_rtype; // @[issue-slot.scala:83:29] wire [1:0] next_lrs2_rtype; // @[issue-slot.scala:84:29] wire [3:0] io_out_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_uopc_0; // @[issue-slot.scala:69:7] wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:69:7] wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_rvc_0; // @[issue-slot.scala:69:7] wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_iq_type_0; // @[issue-slot.scala:69:7] wire [9:0] io_out_uop_fu_code_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_iw_state_0; // @[issue-slot.scala:69:7] wire io_out_uop_iw_p1_poisoned_0; // @[issue-slot.scala:69:7] wire io_out_uop_iw_p2_poisoned_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_br_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_jalr_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_jal_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_sfb_0; // @[issue-slot.scala:69:7] wire [15:0] io_out_uop_br_mask_0; // @[issue-slot.scala:69:7] wire [3:0] io_out_uop_br_tag_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:69:7] wire io_out_uop_edge_inst_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:69:7] wire io_out_uop_taken_0; // @[issue-slot.scala:69:7] wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:69:7] wire [11:0] io_out_uop_csr_addr_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_pdst_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_prs1_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_prs2_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_prs3_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ppred_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:69:7] wire io_out_uop_exception_0; // @[issue-slot.scala:69:7] wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:69:7] wire io_out_uop_bypassable_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:69:7] wire io_out_uop_mem_signed_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_fence_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_fencei_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_amo_0; // @[issue-slot.scala:69:7] wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:69:7] wire io_out_uop_uses_stq_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_unique_0; // @[issue-slot.scala:69:7] wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:69:7] wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:69:7] wire io_out_uop_ldst_val_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7] wire io_out_uop_frs3_en_0; // @[issue-slot.scala:69:7] wire io_out_uop_fp_val_0; // @[issue-slot.scala:69:7] wire io_out_uop_fp_single_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:69:7] wire [3:0] io_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_uopc_0; // @[issue-slot.scala:69:7] wire [31:0] io_uop_inst_0; // @[issue-slot.scala:69:7] wire [31:0] io_uop_debug_inst_0; // @[issue-slot.scala:69:7] wire io_uop_is_rvc_0; // @[issue-slot.scala:69:7] wire [39:0] io_uop_debug_pc_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_iq_type_0; // @[issue-slot.scala:69:7] wire [9:0] io_uop_fu_code_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_iw_state_0; // @[issue-slot.scala:69:7] wire io_uop_iw_p1_poisoned_0; // @[issue-slot.scala:69:7] wire io_uop_iw_p2_poisoned_0; // @[issue-slot.scala:69:7] wire io_uop_is_br_0; // @[issue-slot.scala:69:7] wire io_uop_is_jalr_0; // @[issue-slot.scala:69:7] wire io_uop_is_jal_0; // @[issue-slot.scala:69:7] wire io_uop_is_sfb_0; // @[issue-slot.scala:69:7] wire [15:0] io_uop_br_mask_0; // @[issue-slot.scala:69:7] wire [3:0] io_uop_br_tag_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ftq_idx_0; // @[issue-slot.scala:69:7] wire io_uop_edge_inst_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_pc_lob_0; // @[issue-slot.scala:69:7] wire io_uop_taken_0; // @[issue-slot.scala:69:7] wire [19:0] io_uop_imm_packed_0; // @[issue-slot.scala:69:7] wire [11:0] io_uop_csr_addr_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_rob_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ldq_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_stq_idx_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_rxq_idx_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_pdst_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_prs1_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_prs2_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_prs3_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ppred_0; // @[issue-slot.scala:69:7] wire io_uop_prs1_busy_0; // @[issue-slot.scala:69:7] wire io_uop_prs2_busy_0; // @[issue-slot.scala:69:7] wire io_uop_prs3_busy_0; // @[issue-slot.scala:69:7] wire io_uop_ppred_busy_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_stale_pdst_0; // @[issue-slot.scala:69:7] wire io_uop_exception_0; // @[issue-slot.scala:69:7] wire [63:0] io_uop_exc_cause_0; // @[issue-slot.scala:69:7] wire io_uop_bypassable_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_mem_cmd_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_mem_size_0; // @[issue-slot.scala:69:7] wire io_uop_mem_signed_0; // @[issue-slot.scala:69:7] wire io_uop_is_fence_0; // @[issue-slot.scala:69:7] wire io_uop_is_fencei_0; // @[issue-slot.scala:69:7] wire io_uop_is_amo_0; // @[issue-slot.scala:69:7] wire io_uop_uses_ldq_0; // @[issue-slot.scala:69:7] wire io_uop_uses_stq_0; // @[issue-slot.scala:69:7] wire io_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7] wire io_uop_is_unique_0; // @[issue-slot.scala:69:7] wire io_uop_flush_on_commit_0; // @[issue-slot.scala:69:7] wire io_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_ldst_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs2_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs3_0; // @[issue-slot.scala:69:7] wire io_uop_ldst_val_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_dst_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7] wire io_uop_frs3_en_0; // @[issue-slot.scala:69:7] wire io_uop_fp_val_0; // @[issue-slot.scala:69:7] wire io_uop_fp_single_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7] wire io_uop_bp_debug_if_0; // @[issue-slot.scala:69:7] wire io_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_debug_fsrc_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_debug_tsrc_0; // @[issue-slot.scala:69:7] wire io_debug_p1_0; // @[issue-slot.scala:69:7] wire io_debug_p2_0; // @[issue-slot.scala:69:7] wire io_debug_p3_0; // @[issue-slot.scala:69:7] wire io_debug_ppred_0; // @[issue-slot.scala:69:7] wire [1:0] io_debug_state_0; // @[issue-slot.scala:69:7] wire io_valid_0; // @[issue-slot.scala:69:7] wire io_will_be_valid_0; // @[issue-slot.scala:69:7] wire io_request_0; // @[issue-slot.scala:69:7] wire io_request_hp_0; // @[issue-slot.scala:69:7] assign io_out_uop_iw_state_0 = next_state; // @[issue-slot.scala:69:7, :81:29] assign io_out_uop_uopc_0 = next_uopc; // @[issue-slot.scala:69:7, :82:29] assign io_out_uop_lrs1_rtype_0 = next_lrs1_rtype; // @[issue-slot.scala:69:7, :83:29] assign io_out_uop_lrs2_rtype_0 = next_lrs2_rtype; // @[issue-slot.scala:69:7, :84:29] reg [1:0] state; // @[issue-slot.scala:86:22] assign io_debug_state_0 = state; // @[issue-slot.scala:69:7, :86:22] reg p1; // @[issue-slot.scala:87:22] assign io_debug_p1_0 = p1; // @[issue-slot.scala:69:7, :87:22] wire next_p1 = p1; // @[issue-slot.scala:87:22, :163:25] reg p2; // @[issue-slot.scala:88:22] assign io_debug_p2_0 = p2; // @[issue-slot.scala:69:7, :88:22] wire next_p2 = p2; // @[issue-slot.scala:88:22, :164:25] reg p3; // @[issue-slot.scala:89:22] assign io_debug_p3_0 = p3; // @[issue-slot.scala:69:7, :89:22] wire next_p3 = p3; // @[issue-slot.scala:89:22, :165:25] reg ppred; // @[issue-slot.scala:90:22] assign io_debug_ppred_0 = ppred; // @[issue-slot.scala:69:7, :90:22] wire next_ppred = ppred; // @[issue-slot.scala:90:22, :166:28] reg p1_poisoned; // @[issue-slot.scala:95:28] assign io_out_uop_iw_p1_poisoned_0 = p1_poisoned; // @[issue-slot.scala:69:7, :95:28] assign io_uop_iw_p1_poisoned_0 = p1_poisoned; // @[issue-slot.scala:69:7, :95:28] reg p2_poisoned; // @[issue-slot.scala:96:28] assign io_out_uop_iw_p2_poisoned_0 = p2_poisoned; // @[issue-slot.scala:69:7, :96:28] assign io_uop_iw_p2_poisoned_0 = p2_poisoned; // @[issue-slot.scala:69:7, :96:28] wire next_p1_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p1_poisoned_0 : p1_poisoned; // @[issue-slot.scala:69:7, :95:28, :99:29] wire next_p2_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p2_poisoned_0 : p2_poisoned; // @[issue-slot.scala:69:7, :96:28, :100:29] reg [6:0] slot_uop_uopc; // @[issue-slot.scala:102:25] reg [31:0] slot_uop_inst; // @[issue-slot.scala:102:25] assign io_out_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25] reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:102:25] assign io_out_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_rvc; // @[issue-slot.scala:102:25] assign io_out_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25] reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_iq_type; // @[issue-slot.scala:102:25] assign io_out_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25] assign io_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25] reg [9:0] slot_uop_fu_code; // @[issue-slot.scala:102:25] assign io_out_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25] reg [3:0] slot_uop_ctrl_br_type; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_ctrl_op1_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_op2_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_imm_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ctrl_op_fcn; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_load; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_sta; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_std; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_iw_state; // @[issue-slot.scala:102:25] assign io_uop_iw_state_0 = slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_iw_p1_poisoned; // @[issue-slot.scala:102:25] reg slot_uop_iw_p2_poisoned; // @[issue-slot.scala:102:25] reg slot_uop_is_br; // @[issue-slot.scala:102:25] assign io_out_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_jalr; // @[issue-slot.scala:102:25] assign io_out_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_jal; // @[issue-slot.scala:102:25] assign io_out_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_sfb; // @[issue-slot.scala:102:25] assign io_out_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25] reg [15:0] slot_uop_br_mask; // @[issue-slot.scala:102:25] assign io_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25] reg [3:0] slot_uop_br_tag; // @[issue-slot.scala:102:25] assign io_out_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25] assign io_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ftq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_edge_inst; // @[issue-slot.scala:102:25] assign io_out_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:102:25] assign io_out_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25] assign io_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_taken; // @[issue-slot.scala:102:25] assign io_out_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25] assign io_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25] reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:102:25] assign io_out_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25] assign io_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25] reg [11:0] slot_uop_csr_addr; // @[issue-slot.scala:102:25] assign io_out_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25] assign io_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_rob_idx; // @[issue-slot.scala:102:25] assign io_out_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ldq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_stq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_pdst; // @[issue-slot.scala:102:25] assign io_out_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_prs1; // @[issue-slot.scala:102:25] assign io_out_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_prs2; // @[issue-slot.scala:102:25] assign io_out_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_prs3; // @[issue-slot.scala:102:25] assign io_out_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ppred; // @[issue-slot.scala:102:25] assign io_out_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs1_busy; // @[issue-slot.scala:102:25] assign io_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs2_busy; // @[issue-slot.scala:102:25] assign io_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs3_busy; // @[issue-slot.scala:102:25] assign io_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ppred_busy; // @[issue-slot.scala:102:25] assign io_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_stale_pdst; // @[issue-slot.scala:102:25] assign io_out_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_exception; // @[issue-slot.scala:102:25] assign io_out_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25] assign io_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25] reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:102:25] assign io_out_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25] assign io_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bypassable; // @[issue-slot.scala:102:25] assign io_out_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:102:25] assign io_out_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:102:25] assign io_out_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_mem_signed; // @[issue-slot.scala:102:25] assign io_out_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_fence; // @[issue-slot.scala:102:25] assign io_out_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_fencei; // @[issue-slot.scala:102:25] assign io_out_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_amo; // @[issue-slot.scala:102:25] assign io_out_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_uses_ldq; // @[issue-slot.scala:102:25] assign io_out_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25] assign io_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_uses_stq; // @[issue-slot.scala:102:25] assign io_out_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25] assign io_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:102:25] assign io_out_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_unique; // @[issue-slot.scala:102:25] assign io_out_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_flush_on_commit; // @[issue-slot.scala:102:25] assign io_out_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25] assign io_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_ldst; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:102:25] assign io_out_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:102:25] assign io_out_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:102:25] assign io_out_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ldst_val; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:102:25] assign io_out_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25] assign io_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:102:25] reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:102:25] reg slot_uop_frs3_en; // @[issue-slot.scala:102:25] assign io_out_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25] assign io_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_fp_val; // @[issue-slot.scala:102:25] assign io_out_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_fp_single; // @[issue-slot.scala:102:25] assign io_out_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bp_debug_if; // @[issue-slot.scala:102:25] assign io_out_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:102:25] assign io_out_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_debug_fsrc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_debug_tsrc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25] wire [6:0] next_uop_uopc = io_in_uop_valid_0 ? io_in_uop_bits_uopc_0 : slot_uop_uopc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [31:0] next_uop_inst = io_in_uop_valid_0 ? io_in_uop_bits_inst_0 : slot_uop_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [31:0] next_uop_debug_inst = io_in_uop_valid_0 ? io_in_uop_bits_debug_inst_0 : slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_rvc = io_in_uop_valid_0 ? io_in_uop_bits_is_rvc_0 : slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [39:0] next_uop_debug_pc = io_in_uop_valid_0 ? io_in_uop_bits_debug_pc_0 : slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_iq_type = io_in_uop_valid_0 ? io_in_uop_bits_iq_type_0 : slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [9:0] next_uop_fu_code = io_in_uop_valid_0 ? io_in_uop_bits_fu_code_0 : slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [3:0] next_uop_ctrl_br_type = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_br_type_0 : slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_ctrl_op1_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op1_sel_0 : slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_op2_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op2_sel_0 : slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_imm_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_imm_sel_0 : slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ctrl_op_fcn = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op_fcn_0 : slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_fcn_dw = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_fcn_dw_0 : slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_csr_cmd = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_csr_cmd_0 : slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_load = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_load_0 : slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_sta = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_sta_0 : slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_std = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_std_0 : slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_iw_state = io_in_uop_valid_0 ? io_in_uop_bits_iw_state_0 : slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_iw_p1_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p1_poisoned_0 : slot_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_iw_p2_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p2_poisoned_0 : slot_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_br = io_in_uop_valid_0 ? io_in_uop_bits_is_br_0 : slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_jalr = io_in_uop_valid_0 ? io_in_uop_bits_is_jalr_0 : slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_jal = io_in_uop_valid_0 ? io_in_uop_bits_is_jal_0 : slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_sfb = io_in_uop_valid_0 ? io_in_uop_bits_is_sfb_0 : slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [15:0] next_uop_br_mask = io_in_uop_valid_0 ? io_in_uop_bits_br_mask_0 : slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [3:0] next_uop_br_tag = io_in_uop_valid_0 ? io_in_uop_bits_br_tag_0 : slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ftq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ftq_idx_0 : slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_edge_inst = io_in_uop_valid_0 ? io_in_uop_bits_edge_inst_0 : slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_pc_lob = io_in_uop_valid_0 ? io_in_uop_bits_pc_lob_0 : slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_taken = io_in_uop_valid_0 ? io_in_uop_bits_taken_0 : slot_uop_taken; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [19:0] next_uop_imm_packed = io_in_uop_valid_0 ? io_in_uop_bits_imm_packed_0 : slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [11:0] next_uop_csr_addr = io_in_uop_valid_0 ? io_in_uop_bits_csr_addr_0 : slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_rob_idx = io_in_uop_valid_0 ? io_in_uop_bits_rob_idx_0 : slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ldq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ldq_idx_0 : slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_stq_idx = io_in_uop_valid_0 ? io_in_uop_bits_stq_idx_0 : slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_rxq_idx = io_in_uop_valid_0 ? io_in_uop_bits_rxq_idx_0 : slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_pdst = io_in_uop_valid_0 ? io_in_uop_bits_pdst_0 : slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_prs1 = io_in_uop_valid_0 ? io_in_uop_bits_prs1_0 : slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_prs2 = io_in_uop_valid_0 ? io_in_uop_bits_prs2_0 : slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_prs3 = io_in_uop_valid_0 ? io_in_uop_bits_prs3_0 : slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ppred = io_in_uop_valid_0 ? io_in_uop_bits_ppred_0 : slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs1_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs1_busy_0 : slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs2_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs2_busy_0 : slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs3_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs3_busy_0 : slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ppred_busy = io_in_uop_valid_0 ? io_in_uop_bits_ppred_busy_0 : slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_stale_pdst = io_in_uop_valid_0 ? io_in_uop_bits_stale_pdst_0 : slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_exception = io_in_uop_valid_0 ? io_in_uop_bits_exception_0 : slot_uop_exception; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [63:0] next_uop_exc_cause = io_in_uop_valid_0 ? io_in_uop_bits_exc_cause_0 : slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bypassable = io_in_uop_valid_0 ? io_in_uop_bits_bypassable_0 : slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_mem_cmd = io_in_uop_valid_0 ? io_in_uop_bits_mem_cmd_0 : slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_mem_size = io_in_uop_valid_0 ? io_in_uop_bits_mem_size_0 : slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_mem_signed = io_in_uop_valid_0 ? io_in_uop_bits_mem_signed_0 : slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_fence = io_in_uop_valid_0 ? io_in_uop_bits_is_fence_0 : slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_fencei = io_in_uop_valid_0 ? io_in_uop_bits_is_fencei_0 : slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_amo = io_in_uop_valid_0 ? io_in_uop_bits_is_amo_0 : slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_uses_ldq = io_in_uop_valid_0 ? io_in_uop_bits_uses_ldq_0 : slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_uses_stq = io_in_uop_valid_0 ? io_in_uop_bits_uses_stq_0 : slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_sys_pc2epc = io_in_uop_valid_0 ? io_in_uop_bits_is_sys_pc2epc_0 : slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_unique = io_in_uop_valid_0 ? io_in_uop_bits_is_unique_0 : slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_flush_on_commit = io_in_uop_valid_0 ? io_in_uop_bits_flush_on_commit_0 : slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ldst_is_rs1 = io_in_uop_valid_0 ? io_in_uop_bits_ldst_is_rs1_0 : slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_ldst = io_in_uop_valid_0 ? io_in_uop_bits_ldst_0 : slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs1 = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_0 : slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs2 = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_0 : slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs3 = io_in_uop_valid_0 ? io_in_uop_bits_lrs3_0 : slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ldst_val = io_in_uop_valid_0 ? io_in_uop_bits_ldst_val_0 : slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_dst_rtype = io_in_uop_valid_0 ? io_in_uop_bits_dst_rtype_0 : slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_lrs1_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_rtype_0 : slot_uop_lrs1_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_lrs2_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_rtype_0 : slot_uop_lrs2_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_frs3_en = io_in_uop_valid_0 ? io_in_uop_bits_frs3_en_0 : slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_fp_val = io_in_uop_valid_0 ? io_in_uop_bits_fp_val_0 : slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_fp_single = io_in_uop_valid_0 ? io_in_uop_bits_fp_single_0 : slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_pf_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_pf_if_0 : slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_ae_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ae_if_0 : slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_ma_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ma_if_0 : slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bp_debug_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_debug_if_0 : slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bp_xcpt_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_xcpt_if_0 : slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_debug_fsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_fsrc_0 : slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_debug_tsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_tsrc_0 : slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire _T_11 = state == 2'h2; // @[issue-slot.scala:86:22, :134:25] wire _T_7 = io_grant_0 & state == 2'h1 | io_grant_0 & _T_11 & p1 & p2 & ppred; // @[issue-slot.scala:69:7, :86:22, :87:22, :88:22, :90:22, :133:{26,36,52}, :134:{15,25,40,46,52}] wire _T_12 = io_grant_0 & _T_11; // @[issue-slot.scala:69:7, :134:25, :139:25] wire _T_14 = io_ldspec_miss_0 & (p1_poisoned | p2_poisoned); // @[issue-slot.scala:69:7, :95:28, :96:28, :140:{28,44}] wire _GEN = _T_12 & ~_T_14; // @[issue-slot.scala:126:14, :139:{25,51}, :140:{11,28,62}, :141:18] wire _GEN_0 = io_kill_0 | _T_7; // @[issue-slot.scala:69:7, :102:25, :131:18, :133:52, :134:63, :139:51] wire _GEN_1 = _GEN_0 | ~(_T_12 & ~_T_14 & p1); // @[issue-slot.scala:87:22, :102:25, :131:18, :134:63, :139:{25,51}, :140:{11,28,62}, :142:17, :143:23] assign next_uopc = _GEN_1 ? slot_uop_uopc : 7'h3; // @[issue-slot.scala:82:29, :102:25, :131:18, :134:63, :139:51] assign next_lrs1_rtype = _GEN_1 ? slot_uop_lrs1_rtype : 2'h2; // @[issue-slot.scala:83:29, :102:25, :131:18, :134:63, :139:51] wire _GEN_2 = _GEN_0 | ~_GEN | p1; // @[issue-slot.scala:87:22, :102:25, :126:14, :131:18, :134:63, :139:51, :140:62, :141:18, :142:17] assign next_lrs2_rtype = _GEN_2 ? slot_uop_lrs2_rtype : 2'h2; // @[issue-slot.scala:84:29, :102:25, :131:18, :134:63, :139:51, :140:62, :142:17] wire _p1_T = ~io_in_uop_bits_prs1_busy_0; // @[issue-slot.scala:69:7, :169:11] wire _p2_T = ~io_in_uop_bits_prs2_busy_0; // @[issue-slot.scala:69:7, :170:11] wire _p3_T = ~io_in_uop_bits_prs3_busy_0; // @[issue-slot.scala:69:7, :171:11] wire _ppred_T = ~io_in_uop_bits_ppred_busy_0; // @[issue-slot.scala:69:7, :172:14] wire _T_22 = io_ldspec_miss_0 & next_p1_poisoned; // @[issue-slot.scala:69:7, :99:29, :175:24] wire _T_27 = io_ldspec_miss_0 & next_p2_poisoned; // @[issue-slot.scala:69:7, :100:29, :179:24] wire _T_85 = io_spec_ld_wakeup_0_valid_0 & io_spec_ld_wakeup_0_bits_0 == next_uop_prs1 & next_uop_lrs1_rtype == 2'h0; // @[issue-slot.scala:69:7, :103:21, :209:38, :210:{33,51}, :211:27] wire _T_93 = io_spec_ld_wakeup_0_valid_0 & io_spec_ld_wakeup_0_bits_0 == next_uop_prs2 & next_uop_lrs2_rtype == 2'h0; // @[issue-slot.scala:69:7, :103:21, :216:38, :217:{33,51}, :218:27]
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File TLSerdes.scala: package testchipip.serdes import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy._ import org.chipsalliance.cde.config._ import freechips.rocketchip.util._ import freechips.rocketchip.tilelink._ object TLSerdesser { // This should be the standard bundle type for TLSerdesser val STANDARD_TLBUNDLE_PARAMS = TLBundleParameters( addressBits=64, dataBits=64, sourceBits=8, sinkBits=8, sizeBits=8, echoFields=Nil, requestFields=Nil, responseFields=Nil, hasBCE=true) } class SerdesDebugIO extends Bundle { val ser_busy = Output(Bool()) val des_busy = Output(Bool()) } class TLSerdesser( val flitWidth: Int, clientPortParams: Option[TLMasterPortParameters], managerPortParams: Option[TLSlavePortParameters], val bundleParams: TLBundleParameters = TLSerdesser.STANDARD_TLBUNDLE_PARAMS, nameSuffix: Option[String] = None ) (implicit p: Parameters) extends LazyModule { require (clientPortParams.isDefined || managerPortParams.isDefined) val clientNode = clientPortParams.map { c => TLClientNode(Seq(c)) } val managerNode = managerPortParams.map { m => TLManagerNode(Seq(m)) } override lazy val desiredName = (Seq("TLSerdesser") ++ nameSuffix).mkString("_") lazy val module = new Impl class Impl extends LazyModuleImp(this) { val io = IO(new Bundle { val ser = Vec(5, new DecoupledFlitIO(flitWidth)) val debug = new SerdesDebugIO }) val client_tl = clientNode.map(_.out(0)._1).getOrElse(0.U.asTypeOf(new TLBundle(bundleParams))) val client_edge = clientNode.map(_.out(0)._2) val manager_tl = managerNode.map(_.in(0)._1).getOrElse(0.U.asTypeOf(new TLBundle(bundleParams))) val manager_edge = managerNode.map(_.in(0)._2) val clientParams = client_edge.map(_.bundle).getOrElse(bundleParams) val managerParams = manager_edge.map(_.bundle).getOrElse(bundleParams) val mergedParams = clientParams.union(managerParams).union(bundleParams) require(mergedParams.echoFields.isEmpty, "TLSerdesser does not support TileLink with echo fields") require(mergedParams.requestFields.isEmpty, "TLSerdesser does not support TileLink with request fields") require(mergedParams.responseFields.isEmpty, "TLSerdesser does not support TileLink with response fields") require(mergedParams == bundleParams, s"TLSerdesser is misconfigured, the combined inwards/outwards parameters cannot be serialized using the provided bundle params\n$mergedParams > $bundleParams") val out_channels = Seq( (manager_tl.e, manager_edge.map(e => Module(new TLEToBeat(e, mergedParams, nameSuffix)))), (client_tl.d, client_edge.map (e => Module(new TLDToBeat(e, mergedParams, nameSuffix)))), (manager_tl.c, manager_edge.map(e => Module(new TLCToBeat(e, mergedParams, nameSuffix)))), (client_tl.b, client_edge.map (e => Module(new TLBToBeat(e, mergedParams, nameSuffix)))), (manager_tl.a, manager_edge.map(e => Module(new TLAToBeat(e, mergedParams, nameSuffix)))) ) io.ser.map(_.out.valid := false.B) io.ser.map(_.out.bits := DontCare) val out_sers = out_channels.zipWithIndex.map { case ((c,b),i) => b.map { b => b.io.protocol <> c val ser = Module(new GenericSerializer(b.io.beat.bits.cloneType, flitWidth)).suggestName(s"ser_$i") ser.io.in <> b.io.beat io.ser(i).out <> ser.io.out ser }}.flatten io.debug.ser_busy := out_sers.map(_.io.busy).orR val in_channels = Seq( (client_tl.e, Module(new TLEFromBeat(mergedParams, nameSuffix))), (manager_tl.d, Module(new TLDFromBeat(mergedParams, nameSuffix))), (client_tl.c, Module(new TLCFromBeat(mergedParams, nameSuffix))), (manager_tl.b, Module(new TLBFromBeat(mergedParams, nameSuffix))), (client_tl.a, Module(new TLAFromBeat(mergedParams, nameSuffix))) ) val in_desers = in_channels.zipWithIndex.map { case ((c,b),i) => c <> b.io.protocol val des = Module(new GenericDeserializer(b.io.beat.bits.cloneType, flitWidth)).suggestName(s"des_$i") des.io.in <> io.ser(i).in b.io.beat <> des.io.out des } io.debug.des_busy := in_desers.map(_.io.busy).orR } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module TLSerdesser_SerialRAM( // @[TLSerdes.scala:39:9] input clock, // @[TLSerdes.scala:39:9] input reset, // @[TLSerdes.scala:39:9] output auto_manager_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_manager_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_manager_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_manager_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_manager_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input auto_manager_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_manager_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_manager_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_manager_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_manager_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_manager_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_manager_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_manager_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_manager_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_manager_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output auto_manager_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [2:0] auto_manager_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_manager_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_manager_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_manager_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output io_ser_0_in_ready, // @[TLSerdes.scala:40:16] input io_ser_0_in_valid, // @[TLSerdes.scala:40:16] input [31:0] io_ser_0_in_bits_flit, // @[TLSerdes.scala:40:16] input io_ser_0_out_ready, // @[TLSerdes.scala:40:16] output [31:0] io_ser_0_out_bits_flit, // @[TLSerdes.scala:40:16] output io_ser_1_in_ready, // @[TLSerdes.scala:40:16] input io_ser_1_in_valid, // @[TLSerdes.scala:40:16] input [31:0] io_ser_1_in_bits_flit, // @[TLSerdes.scala:40:16] output io_ser_2_in_ready, // @[TLSerdes.scala:40:16] input io_ser_2_in_valid, // @[TLSerdes.scala:40:16] input [31:0] io_ser_2_in_bits_flit, // @[TLSerdes.scala:40:16] input io_ser_2_out_ready, // @[TLSerdes.scala:40:16] output io_ser_2_out_valid, // @[TLSerdes.scala:40:16] output [31:0] io_ser_2_out_bits_flit, // @[TLSerdes.scala:40:16] output io_ser_3_in_ready, // @[TLSerdes.scala:40:16] input io_ser_3_in_valid, // @[TLSerdes.scala:40:16] input [31:0] io_ser_3_in_bits_flit, // @[TLSerdes.scala:40:16] output io_ser_4_in_ready, // @[TLSerdes.scala:40:16] input io_ser_4_in_valid, // @[TLSerdes.scala:40:16] input [31:0] io_ser_4_in_bits_flit, // @[TLSerdes.scala:40:16] input io_ser_4_out_ready, // @[TLSerdes.scala:40:16] output io_ser_4_out_valid, // @[TLSerdes.scala:40:16] output [31:0] io_ser_4_out_bits_flit // @[TLSerdes.scala:40:16] ); wire _des_4_io_out_valid; // @[TLSerdes.scala:86:23] wire [85:0] _des_4_io_out_bits_payload; // @[TLSerdes.scala:86:23] wire _des_4_io_out_bits_head; // @[TLSerdes.scala:86:23] wire _des_4_io_out_bits_tail; // @[TLSerdes.scala:86:23] wire _des_4_io_busy; // @[TLSerdes.scala:86:23] wire _des_3_io_out_valid; // @[TLSerdes.scala:86:23] wire [84:0] _des_3_io_out_bits_payload; // @[TLSerdes.scala:86:23] wire _des_3_io_out_bits_head; // @[TLSerdes.scala:86:23] wire _des_3_io_out_bits_tail; // @[TLSerdes.scala:86:23] wire _des_3_io_busy; // @[TLSerdes.scala:86:23] wire _des_2_io_out_valid; // @[TLSerdes.scala:86:23] wire [85:0] _des_2_io_out_bits_payload; // @[TLSerdes.scala:86:23] wire _des_2_io_out_bits_head; // @[TLSerdes.scala:86:23] wire _des_2_io_out_bits_tail; // @[TLSerdes.scala:86:23] wire _des_2_io_busy; // @[TLSerdes.scala:86:23] wire _des_1_io_out_valid; // @[TLSerdes.scala:86:23] wire [64:0] _des_1_io_out_bits_payload; // @[TLSerdes.scala:86:23] wire _des_1_io_out_bits_head; // @[TLSerdes.scala:86:23] wire _des_1_io_out_bits_tail; // @[TLSerdes.scala:86:23] wire _des_0_io_out_valid; // @[TLSerdes.scala:86:23] wire [7:0] _des_0_io_out_bits_payload; // @[TLSerdes.scala:86:23] wire _des_0_io_out_bits_head; // @[TLSerdes.scala:86:23] wire _des_0_io_out_bits_tail; // @[TLSerdes.scala:86:23] wire _in_channels_4_2_io_beat_ready; // @[TLSerdes.scala:82:28] wire [7:0] _in_channels_3_2_io_protocol_bits_size; // @[TLSerdes.scala:81:28] wire [7:0] _in_channels_3_2_io_protocol_bits_source; // @[TLSerdes.scala:81:28] wire [63:0] _in_channels_3_2_io_protocol_bits_address; // @[TLSerdes.scala:81:28] wire _in_channels_3_2_io_beat_ready; // @[TLSerdes.scala:81:28] wire _in_channels_2_2_io_beat_ready; // @[TLSerdes.scala:80:28] wire [7:0] _in_channels_1_2_io_protocol_bits_size; // @[TLSerdes.scala:79:28] wire [7:0] _in_channels_1_2_io_protocol_bits_source; // @[TLSerdes.scala:79:28] wire [7:0] _in_channels_1_2_io_protocol_bits_sink; // @[TLSerdes.scala:79:28] wire _in_channels_1_2_io_beat_ready; // @[TLSerdes.scala:79:28] wire _in_channels_0_2_io_beat_ready; // @[TLSerdes.scala:78:28] wire _ser_4_io_in_ready; // @[TLSerdes.scala:69:23] wire _ser_4_io_busy; // @[TLSerdes.scala:69:23] wire _ser_2_io_in_ready; // @[TLSerdes.scala:69:23] wire _ser_0_io_in_ready; // @[TLSerdes.scala:69:23] wire _out_channels_4_2_io_beat_valid; // @[TLSerdes.scala:63:50] wire [85:0] _out_channels_4_2_io_beat_bits_payload; // @[TLSerdes.scala:63:50] wire _out_channels_4_2_io_beat_bits_head; // @[TLSerdes.scala:63:50] wire _out_channels_4_2_io_beat_bits_tail; // @[TLSerdes.scala:63:50] wire _out_channels_2_2_io_beat_bits_head; // @[TLSerdes.scala:61:50] wire _out_channels_0_2_io_beat_bits_head; // @[TLSerdes.scala:59:50] wire auto_manager_in_a_valid_0 = auto_manager_in_a_valid; // @[TLSerdes.scala:39:9] wire [2:0] auto_manager_in_a_bits_opcode_0 = auto_manager_in_a_bits_opcode; // @[TLSerdes.scala:39:9] wire [2:0] auto_manager_in_a_bits_param_0 = auto_manager_in_a_bits_param; // @[TLSerdes.scala:39:9] wire [3:0] auto_manager_in_a_bits_size_0 = auto_manager_in_a_bits_size; // @[TLSerdes.scala:39:9] wire auto_manager_in_a_bits_source_0 = auto_manager_in_a_bits_source; // @[TLSerdes.scala:39:9] wire [31:0] auto_manager_in_a_bits_address_0 = auto_manager_in_a_bits_address; // @[TLSerdes.scala:39:9] wire [7:0] auto_manager_in_a_bits_mask_0 = auto_manager_in_a_bits_mask; // @[TLSerdes.scala:39:9] wire [63:0] auto_manager_in_a_bits_data_0 = auto_manager_in_a_bits_data; // @[TLSerdes.scala:39:9] wire auto_manager_in_a_bits_corrupt_0 = auto_manager_in_a_bits_corrupt; // @[TLSerdes.scala:39:9] wire auto_manager_in_d_ready_0 = auto_manager_in_d_ready; // @[TLSerdes.scala:39:9] wire io_ser_0_in_valid_0 = io_ser_0_in_valid; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_0_in_bits_flit_0 = io_ser_0_in_bits_flit; // @[TLSerdes.scala:39:9] wire io_ser_0_out_ready_0 = io_ser_0_out_ready; // @[TLSerdes.scala:39:9] wire io_ser_1_in_valid_0 = io_ser_1_in_valid; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_1_in_bits_flit_0 = io_ser_1_in_bits_flit; // @[TLSerdes.scala:39:9] wire io_ser_2_in_valid_0 = io_ser_2_in_valid; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_2_in_bits_flit_0 = io_ser_2_in_bits_flit; // @[TLSerdes.scala:39:9] wire io_ser_2_out_ready_0 = io_ser_2_out_ready; // @[TLSerdes.scala:39:9] wire io_ser_3_in_valid_0 = io_ser_3_in_valid; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_3_in_bits_flit_0 = io_ser_3_in_bits_flit; // @[TLSerdes.scala:39:9] wire io_ser_4_in_valid_0 = io_ser_4_in_valid; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_4_in_bits_flit_0 = io_ser_4_in_bits_flit; // @[TLSerdes.scala:39:9] wire io_ser_4_out_ready_0 = io_ser_4_out_ready; // @[TLSerdes.scala:39:9] wire [2:0] client_tl_b_bits_opcode = 3'h0; // @[TLSerdes.scala:45:71] wire [2:0] client_tl_d_bits_opcode = 3'h0; // @[TLSerdes.scala:45:71] wire [2:0] _out_channels_WIRE_bits_sink = 3'h0; // @[Bundles.scala:267:74] wire [2:0] out_channels_0_1_bits_sink = 3'h0; // @[Bundles.scala:267:61] wire [2:0] _out_channels_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _out_channels_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] out_channels_2_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] out_channels_2_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _in_channels_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [1:0] client_tl_b_bits_param = 2'h0; // @[TLSerdes.scala:45:71] wire [1:0] client_tl_d_bits_param = 2'h0; // @[TLSerdes.scala:45:71] wire [1:0] _in_channels_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [3:0] _out_channels_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] out_channels_2_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _in_channels_WIRE_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [7:0] client_tl_b_bits_size = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_b_bits_source = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_b_bits_mask = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_d_bits_size = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_d_bits_source = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_d_bits_sink = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] _in_channels_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [63:0] client_tl_b_bits_address = 64'h0; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_b_bits_data = 64'h0; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_d_bits_data = 64'h0; // @[TLSerdes.scala:45:71] wire [63:0] _out_channels_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] out_channels_2_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _in_channels_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [31:0] io_ser_1_out_bits_flit = 32'h0; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_3_out_bits_flit = 32'h0; // @[TLSerdes.scala:39:9] wire [31:0] _out_channels_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] out_channels_2_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _in_channels_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74] wire io_ser_1_out_ready = 1'h1; // @[TLSerdes.scala:39:9, :40:16, :59:50, :61:50] wire io_ser_3_out_ready = 1'h1; // @[TLSerdes.scala:39:9, :40:16, :59:50, :61:50] wire out_channels_0_1_ready = 1'h1; // @[TLSerdes.scala:39:9, :40:16, :59:50, :61:50] wire out_channels_2_1_ready = 1'h1; // @[TLSerdes.scala:39:9, :40:16, :59:50, :61:50] wire io_ser_0_out_valid = 1'h0; // @[TLSerdes.scala:39:9] wire io_ser_1_out_valid = 1'h0; // @[TLSerdes.scala:39:9] wire io_ser_3_out_valid = 1'h0; // @[TLSerdes.scala:39:9] wire managerNodeIn_a_ready; // @[MixedNode.scala:551:17] wire client_tl_a_ready = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_b_ready = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_b_valid = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_b_bits_corrupt = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_c_ready = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_d_ready = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_d_valid = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_d_bits_denied = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_d_bits_corrupt = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_e_ready = 1'h0; // @[TLSerdes.scala:45:71] wire _out_channels_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _out_channels_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire out_channels_0_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _out_channels_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:74] wire _out_channels_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:74] wire _out_channels_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _out_channels_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire out_channels_2_1_valid = 1'h0; // @[Bundles.scala:265:61] wire out_channels_2_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire out_channels_2_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _in_channels_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _in_channels_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _in_channels_WIRE_bits_source = 1'h0; // @[Bundles.scala:264:74] wire _in_channels_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire in_channels_3_1_ready = 1'h0; // @[Bundles.scala:264:61] wire managerNodeIn_a_valid = auto_manager_in_a_valid_0; // @[TLSerdes.scala:39:9] wire [2:0] managerNodeIn_a_bits_opcode = auto_manager_in_a_bits_opcode_0; // @[TLSerdes.scala:39:9] wire [2:0] managerNodeIn_a_bits_param = auto_manager_in_a_bits_param_0; // @[TLSerdes.scala:39:9] wire [3:0] managerNodeIn_a_bits_size = auto_manager_in_a_bits_size_0; // @[TLSerdes.scala:39:9] wire managerNodeIn_a_bits_source = auto_manager_in_a_bits_source_0; // @[TLSerdes.scala:39:9] wire [31:0] managerNodeIn_a_bits_address = auto_manager_in_a_bits_address_0; // @[TLSerdes.scala:39:9] wire [7:0] managerNodeIn_a_bits_mask = auto_manager_in_a_bits_mask_0; // @[TLSerdes.scala:39:9] wire [63:0] managerNodeIn_a_bits_data = auto_manager_in_a_bits_data_0; // @[TLSerdes.scala:39:9] wire managerNodeIn_a_bits_corrupt = auto_manager_in_a_bits_corrupt_0; // @[TLSerdes.scala:39:9] wire managerNodeIn_d_ready = auto_manager_in_d_ready_0; // @[TLSerdes.scala:39:9] wire managerNodeIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] managerNodeIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] managerNodeIn_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] managerNodeIn_d_bits_size; // @[MixedNode.scala:551:17] wire managerNodeIn_d_bits_source; // @[MixedNode.scala:551:17] wire [2:0] managerNodeIn_d_bits_sink; // @[MixedNode.scala:551:17] wire managerNodeIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] managerNodeIn_d_bits_data; // @[MixedNode.scala:551:17] wire managerNodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire _io_debug_ser_busy_T_1; // @[package.scala:81:59] wire _io_debug_des_busy_T_3; // @[package.scala:81:59] wire auto_manager_in_a_ready_0; // @[TLSerdes.scala:39:9] wire [2:0] auto_manager_in_d_bits_opcode_0; // @[TLSerdes.scala:39:9] wire [1:0] auto_manager_in_d_bits_param_0; // @[TLSerdes.scala:39:9] wire [3:0] auto_manager_in_d_bits_size_0; // @[TLSerdes.scala:39:9] wire auto_manager_in_d_bits_source_0; // @[TLSerdes.scala:39:9] wire [2:0] auto_manager_in_d_bits_sink_0; // @[TLSerdes.scala:39:9] wire auto_manager_in_d_bits_denied_0; // @[TLSerdes.scala:39:9] wire [63:0] auto_manager_in_d_bits_data_0; // @[TLSerdes.scala:39:9] wire auto_manager_in_d_bits_corrupt_0; // @[TLSerdes.scala:39:9] wire auto_manager_in_d_valid_0; // @[TLSerdes.scala:39:9] wire io_ser_0_in_ready_0; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_0_out_bits_flit_0; // @[TLSerdes.scala:39:9] wire io_ser_1_in_ready_0; // @[TLSerdes.scala:39:9] wire io_ser_2_in_ready_0; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_2_out_bits_flit_0; // @[TLSerdes.scala:39:9] wire io_ser_2_out_valid_0; // @[TLSerdes.scala:39:9] wire io_ser_3_in_ready_0; // @[TLSerdes.scala:39:9] wire io_ser_4_in_ready_0; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_4_out_bits_flit_0; // @[TLSerdes.scala:39:9] wire io_ser_4_out_valid_0; // @[TLSerdes.scala:39:9] wire io_debug_ser_busy; // @[TLSerdes.scala:39:9] wire io_debug_des_busy; // @[TLSerdes.scala:39:9] assign auto_manager_in_a_ready_0 = managerNodeIn_a_ready; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_valid_0 = managerNodeIn_d_valid; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_opcode_0 = managerNodeIn_d_bits_opcode; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_param_0 = managerNodeIn_d_bits_param; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_size_0 = managerNodeIn_d_bits_size; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_source_0 = managerNodeIn_d_bits_source; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_sink_0 = managerNodeIn_d_bits_sink; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_denied_0 = managerNodeIn_d_bits_denied; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_data_0 = managerNodeIn_d_bits_data; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_corrupt_0 = managerNodeIn_d_bits_corrupt; // @[TLSerdes.scala:39:9] wire [2:0] client_tl_a_bits_opcode; // @[TLSerdes.scala:45:71] wire [2:0] client_tl_a_bits_param; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_a_bits_size; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_a_bits_source; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_a_bits_address; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_a_bits_mask; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_a_bits_data; // @[TLSerdes.scala:45:71] wire client_tl_a_bits_corrupt; // @[TLSerdes.scala:45:71] wire client_tl_a_valid; // @[TLSerdes.scala:45:71] wire [2:0] client_tl_c_bits_opcode; // @[TLSerdes.scala:45:71] wire [2:0] client_tl_c_bits_param; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_c_bits_size; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_c_bits_source; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_c_bits_address; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_c_bits_data; // @[TLSerdes.scala:45:71] wire client_tl_c_bits_corrupt; // @[TLSerdes.scala:45:71] wire client_tl_c_valid; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_e_bits_sink; // @[TLSerdes.scala:45:71] wire client_tl_e_valid; // @[TLSerdes.scala:45:71] wire _io_debug_ser_busy_T; // @[package.scala:81:59] assign _io_debug_ser_busy_T_1 = _io_debug_ser_busy_T | _ser_4_io_busy; // @[TLSerdes.scala:69:23] assign io_debug_ser_busy = _io_debug_ser_busy_T_1; // @[TLSerdes.scala:39:9] wire [2:0] in_channels_3_1_bits_opcode; // @[Bundles.scala:264:61] wire [1:0] in_channels_3_1_bits_param; // @[Bundles.scala:264:61] wire [3:0] in_channels_3_1_bits_size; // @[Bundles.scala:264:61] wire in_channels_3_1_bits_source; // @[Bundles.scala:264:61] wire [31:0] in_channels_3_1_bits_address; // @[Bundles.scala:264:61] wire [7:0] in_channels_3_1_bits_mask; // @[Bundles.scala:264:61] wire [63:0] in_channels_3_1_bits_data; // @[Bundles.scala:264:61] wire in_channels_3_1_bits_corrupt; // @[Bundles.scala:264:61] wire in_channels_3_1_valid; // @[Bundles.scala:264:61] assign managerNodeIn_d_bits_size = _in_channels_1_2_io_protocol_bits_size[3:0]; // @[TLSerdes.scala:79:28, :85:9] assign managerNodeIn_d_bits_source = _in_channels_1_2_io_protocol_bits_source[0]; // @[TLSerdes.scala:79:28, :85:9] assign managerNodeIn_d_bits_sink = _in_channels_1_2_io_protocol_bits_sink[2:0]; // @[TLSerdes.scala:79:28, :85:9] assign in_channels_3_1_bits_size = _in_channels_3_2_io_protocol_bits_size[3:0]; // @[TLSerdes.scala:81:28, :85:9] assign in_channels_3_1_bits_source = _in_channels_3_2_io_protocol_bits_source[0]; // @[TLSerdes.scala:81:28, :85:9] assign in_channels_3_1_bits_address = _in_channels_3_2_io_protocol_bits_address[31:0]; // @[TLSerdes.scala:81:28, :85:9] wire _io_debug_des_busy_T; // @[package.scala:81:59] wire _io_debug_des_busy_T_1 = _io_debug_des_busy_T | _des_2_io_busy; // @[TLSerdes.scala:86:23] wire _io_debug_des_busy_T_2 = _io_debug_des_busy_T_1 | _des_3_io_busy; // @[TLSerdes.scala:86:23] assign _io_debug_des_busy_T_3 = _io_debug_des_busy_T_2 | _des_4_io_busy; // @[TLSerdes.scala:86:23] assign io_debug_des_busy = _io_debug_des_busy_T_3; // @[TLSerdes.scala:39:9] TLMonitor_64 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (managerNodeIn_a_ready), // @[MixedNode.scala:551:17] .io_in_a_valid (managerNodeIn_a_valid), // @[MixedNode.scala:551:17] .io_in_a_bits_opcode (managerNodeIn_a_bits_opcode), // @[MixedNode.scala:551:17] .io_in_a_bits_param (managerNodeIn_a_bits_param), // @[MixedNode.scala:551:17] .io_in_a_bits_size (managerNodeIn_a_bits_size), // @[MixedNode.scala:551:17] .io_in_a_bits_source (managerNodeIn_a_bits_source), // @[MixedNode.scala:551:17] .io_in_a_bits_address (managerNodeIn_a_bits_address), // @[MixedNode.scala:551:17] .io_in_a_bits_mask (managerNodeIn_a_bits_mask), // @[MixedNode.scala:551:17] .io_in_a_bits_data (managerNodeIn_a_bits_data), // @[MixedNode.scala:551:17] .io_in_a_bits_corrupt (managerNodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17] .io_in_d_ready (managerNodeIn_d_ready), // @[MixedNode.scala:551:17] .io_in_d_valid (managerNodeIn_d_valid), // @[MixedNode.scala:551:17] .io_in_d_bits_opcode (managerNodeIn_d_bits_opcode), // @[MixedNode.scala:551:17] .io_in_d_bits_param (managerNodeIn_d_bits_param), // @[MixedNode.scala:551:17] .io_in_d_bits_size (managerNodeIn_d_bits_size), // @[MixedNode.scala:551:17] .io_in_d_bits_source (managerNodeIn_d_bits_source), // @[MixedNode.scala:551:17] .io_in_d_bits_sink (managerNodeIn_d_bits_sink), // @[MixedNode.scala:551:17] .io_in_d_bits_denied (managerNodeIn_d_bits_denied), // @[MixedNode.scala:551:17] .io_in_d_bits_data (managerNodeIn_d_bits_data), // @[MixedNode.scala:551:17] .io_in_d_bits_corrupt (managerNodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17] ); // @[Nodes.scala:27:25] TLEToBeat_SerialRAM_a64d64s8k8z8c out_channels_0_2 ( // @[TLSerdes.scala:59:50] .clock (clock), .reset (reset), .io_beat_ready (_ser_0_io_in_ready), // @[TLSerdes.scala:69:23] .io_beat_bits_head (_out_channels_0_2_io_beat_bits_head) ); // @[TLSerdes.scala:59:50] TLCToBeat_SerialRAM_a64d64s8k8z8c out_channels_2_2 ( // @[TLSerdes.scala:61:50] .clock (clock), .reset (reset), .io_beat_ready (_ser_2_io_in_ready), // @[TLSerdes.scala:69:23] .io_beat_bits_head (_out_channels_2_2_io_beat_bits_head) ); // @[TLSerdes.scala:61:50] TLAToBeat_SerialRAM_a64d64s8k8z8c out_channels_4_2 ( // @[TLSerdes.scala:63:50] .clock (clock), .reset (reset), .io_protocol_ready (managerNodeIn_a_ready), .io_protocol_valid (managerNodeIn_a_valid), // @[MixedNode.scala:551:17] .io_protocol_bits_opcode (managerNodeIn_a_bits_opcode), // @[MixedNode.scala:551:17] .io_protocol_bits_param (managerNodeIn_a_bits_param), // @[MixedNode.scala:551:17] .io_protocol_bits_size ({4'h0, managerNodeIn_a_bits_size}), // @[TLSerdes.scala:68:21] .io_protocol_bits_source ({7'h0, managerNodeIn_a_bits_source}), // @[TLSerdes.scala:68:21] .io_protocol_bits_address ({32'h0, managerNodeIn_a_bits_address}), // @[TLSerdes.scala:68:21] .io_protocol_bits_mask (managerNodeIn_a_bits_mask), // @[MixedNode.scala:551:17] .io_protocol_bits_data (managerNodeIn_a_bits_data), // @[MixedNode.scala:551:17] .io_protocol_bits_corrupt (managerNodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17] .io_beat_ready (_ser_4_io_in_ready), // @[TLSerdes.scala:69:23] .io_beat_valid (_out_channels_4_2_io_beat_valid), .io_beat_bits_payload (_out_channels_4_2_io_beat_bits_payload), .io_beat_bits_head (_out_channels_4_2_io_beat_bits_head), .io_beat_bits_tail (_out_channels_4_2_io_beat_bits_tail) ); // @[TLSerdes.scala:63:50] GenericSerializer_TLBeatw10_f32 ser_0 ( // @[TLSerdes.scala:69:23] .clock (clock), .reset (reset), .io_in_ready (_ser_0_io_in_ready), .io_in_bits_head (_out_channels_0_2_io_beat_bits_head), // @[TLSerdes.scala:59:50] .io_out_ready (io_ser_0_out_ready_0), // @[TLSerdes.scala:39:9] .io_out_bits_flit (io_ser_0_out_bits_flit_0) ); // @[TLSerdes.scala:69:23] GenericSerializer_TLBeatw88_f32 ser_2 ( // @[TLSerdes.scala:69:23] .clock (clock), .reset (reset), .io_in_ready (_ser_2_io_in_ready), .io_in_bits_head (_out_channels_2_2_io_beat_bits_head), // @[TLSerdes.scala:61:50] .io_out_ready (io_ser_2_out_ready_0), // @[TLSerdes.scala:39:9] .io_out_valid (io_ser_2_out_valid_0), .io_out_bits_flit (io_ser_2_out_bits_flit_0), .io_busy (_io_debug_ser_busy_T) ); // @[TLSerdes.scala:69:23] GenericSerializer_TLBeatw88_f32_1 ser_4 ( // @[TLSerdes.scala:69:23] .clock (clock), .reset (reset), .io_in_ready (_ser_4_io_in_ready), .io_in_valid (_out_channels_4_2_io_beat_valid), // @[TLSerdes.scala:63:50] .io_in_bits_payload (_out_channels_4_2_io_beat_bits_payload), // @[TLSerdes.scala:63:50] .io_in_bits_head (_out_channels_4_2_io_beat_bits_head), // @[TLSerdes.scala:63:50] .io_in_bits_tail (_out_channels_4_2_io_beat_bits_tail), // @[TLSerdes.scala:63:50] .io_out_ready (io_ser_4_out_ready_0), // @[TLSerdes.scala:39:9] .io_out_valid (io_ser_4_out_valid_0), .io_out_bits_flit (io_ser_4_out_bits_flit_0), .io_busy (_ser_4_io_busy) ); // @[TLSerdes.scala:69:23] TLEFromBeat_SerialRAM_a64d64s8k8z8c in_channels_0_2 ( // @[TLSerdes.scala:78:28] .clock (clock), .reset (reset), .io_protocol_valid (client_tl_e_valid), .io_protocol_bits_sink (client_tl_e_bits_sink), .io_beat_ready (_in_channels_0_2_io_beat_ready), .io_beat_valid (_des_0_io_out_valid), // @[TLSerdes.scala:86:23] .io_beat_bits_payload (_des_0_io_out_bits_payload), // @[TLSerdes.scala:86:23] .io_beat_bits_head (_des_0_io_out_bits_head), // @[TLSerdes.scala:86:23] .io_beat_bits_tail (_des_0_io_out_bits_tail) // @[TLSerdes.scala:86:23] ); // @[TLSerdes.scala:78:28] TLDFromBeat_SerialRAM_a64d64s8k8z8c in_channels_1_2 ( // @[TLSerdes.scala:79:28] .clock (clock), .reset (reset), .io_protocol_ready (managerNodeIn_d_ready), // @[MixedNode.scala:551:17] .io_protocol_valid (managerNodeIn_d_valid), .io_protocol_bits_opcode (managerNodeIn_d_bits_opcode), .io_protocol_bits_param (managerNodeIn_d_bits_param), .io_protocol_bits_size (_in_channels_1_2_io_protocol_bits_size), .io_protocol_bits_source (_in_channels_1_2_io_protocol_bits_source), .io_protocol_bits_sink (_in_channels_1_2_io_protocol_bits_sink), .io_protocol_bits_denied (managerNodeIn_d_bits_denied), .io_protocol_bits_data (managerNodeIn_d_bits_data), .io_protocol_bits_corrupt (managerNodeIn_d_bits_corrupt), .io_beat_ready (_in_channels_1_2_io_beat_ready), .io_beat_valid (_des_1_io_out_valid), // @[TLSerdes.scala:86:23] .io_beat_bits_payload (_des_1_io_out_bits_payload), // @[TLSerdes.scala:86:23] .io_beat_bits_head (_des_1_io_out_bits_head), // @[TLSerdes.scala:86:23] .io_beat_bits_tail (_des_1_io_out_bits_tail) // @[TLSerdes.scala:86:23] ); // @[TLSerdes.scala:79:28] TLCFromBeat_SerialRAM_a64d64s8k8z8c in_channels_2_2 ( // @[TLSerdes.scala:80:28] .clock (clock), .reset (reset), .io_protocol_valid (client_tl_c_valid), .io_protocol_bits_opcode (client_tl_c_bits_opcode), .io_protocol_bits_param (client_tl_c_bits_param), .io_protocol_bits_size (client_tl_c_bits_size), .io_protocol_bits_source (client_tl_c_bits_source), .io_protocol_bits_address (client_tl_c_bits_address), .io_protocol_bits_data (client_tl_c_bits_data), .io_protocol_bits_corrupt (client_tl_c_bits_corrupt), .io_beat_ready (_in_channels_2_2_io_beat_ready), .io_beat_valid (_des_2_io_out_valid), // @[TLSerdes.scala:86:23] .io_beat_bits_payload (_des_2_io_out_bits_payload), // @[TLSerdes.scala:86:23] .io_beat_bits_head (_des_2_io_out_bits_head), // @[TLSerdes.scala:86:23] .io_beat_bits_tail (_des_2_io_out_bits_tail) // @[TLSerdes.scala:86:23] ); // @[TLSerdes.scala:80:28] TLBFromBeat_SerialRAM_a64d64s8k8z8c in_channels_3_2 ( // @[TLSerdes.scala:81:28] .clock (clock), .reset (reset), .io_protocol_valid (in_channels_3_1_valid), .io_protocol_bits_opcode (in_channels_3_1_bits_opcode), .io_protocol_bits_param (in_channels_3_1_bits_param), .io_protocol_bits_size (_in_channels_3_2_io_protocol_bits_size), .io_protocol_bits_source (_in_channels_3_2_io_protocol_bits_source), .io_protocol_bits_address (_in_channels_3_2_io_protocol_bits_address), .io_protocol_bits_mask (in_channels_3_1_bits_mask), .io_protocol_bits_data (in_channels_3_1_bits_data), .io_protocol_bits_corrupt (in_channels_3_1_bits_corrupt), .io_beat_ready (_in_channels_3_2_io_beat_ready), .io_beat_valid (_des_3_io_out_valid), // @[TLSerdes.scala:86:23] .io_beat_bits_payload (_des_3_io_out_bits_payload), // @[TLSerdes.scala:86:23] .io_beat_bits_head (_des_3_io_out_bits_head), // @[TLSerdes.scala:86:23] .io_beat_bits_tail (_des_3_io_out_bits_tail) // @[TLSerdes.scala:86:23] ); // @[TLSerdes.scala:81:28] TLAFromBeat_SerialRAM_a64d64s8k8z8c in_channels_4_2 ( // @[TLSerdes.scala:82:28] .clock (clock), .reset (reset), .io_protocol_valid (client_tl_a_valid), .io_protocol_bits_opcode (client_tl_a_bits_opcode), .io_protocol_bits_param (client_tl_a_bits_param), .io_protocol_bits_size (client_tl_a_bits_size), .io_protocol_bits_source (client_tl_a_bits_source), .io_protocol_bits_address (client_tl_a_bits_address), .io_protocol_bits_mask (client_tl_a_bits_mask), .io_protocol_bits_data (client_tl_a_bits_data), .io_protocol_bits_corrupt (client_tl_a_bits_corrupt), .io_beat_ready (_in_channels_4_2_io_beat_ready), .io_beat_valid (_des_4_io_out_valid), // @[TLSerdes.scala:86:23] .io_beat_bits_payload (_des_4_io_out_bits_payload), // @[TLSerdes.scala:86:23] .io_beat_bits_head (_des_4_io_out_bits_head), // @[TLSerdes.scala:86:23] .io_beat_bits_tail (_des_4_io_out_bits_tail) // @[TLSerdes.scala:86:23] ); // @[TLSerdes.scala:82:28] GenericDeserializer_TLBeatw10_f32_1 des_0 ( // @[TLSerdes.scala:86:23] .clock (clock), .reset (reset), .io_in_ready (io_ser_0_in_ready_0), .io_in_valid (io_ser_0_in_valid_0), // @[TLSerdes.scala:39:9] .io_in_bits_flit (io_ser_0_in_bits_flit_0), // @[TLSerdes.scala:39:9] .io_out_ready (_in_channels_0_2_io_beat_ready), // @[TLSerdes.scala:78:28] .io_out_valid (_des_0_io_out_valid), .io_out_bits_payload (_des_0_io_out_bits_payload), .io_out_bits_head (_des_0_io_out_bits_head), .io_out_bits_tail (_des_0_io_out_bits_tail) ); // @[TLSerdes.scala:86:23] GenericDeserializer_TLBeatw67_f32_1 des_1 ( // @[TLSerdes.scala:86:23] .clock (clock), .reset (reset), .io_in_ready (io_ser_1_in_ready_0), .io_in_valid (io_ser_1_in_valid_0), // @[TLSerdes.scala:39:9] .io_in_bits_flit (io_ser_1_in_bits_flit_0), // @[TLSerdes.scala:39:9] .io_out_ready (_in_channels_1_2_io_beat_ready), // @[TLSerdes.scala:79:28] .io_out_valid (_des_1_io_out_valid), .io_out_bits_payload (_des_1_io_out_bits_payload), .io_out_bits_head (_des_1_io_out_bits_head), .io_out_bits_tail (_des_1_io_out_bits_tail), .io_busy (_io_debug_des_busy_T) ); // @[TLSerdes.scala:86:23] GenericDeserializer_TLBeatw88_f32_2 des_2 ( // @[TLSerdes.scala:86:23] .clock (clock), .reset (reset), .io_in_ready (io_ser_2_in_ready_0), .io_in_valid (io_ser_2_in_valid_0), // @[TLSerdes.scala:39:9] .io_in_bits_flit (io_ser_2_in_bits_flit_0), // @[TLSerdes.scala:39:9] .io_out_ready (_in_channels_2_2_io_beat_ready), // @[TLSerdes.scala:80:28] .io_out_valid (_des_2_io_out_valid), .io_out_bits_payload (_des_2_io_out_bits_payload), .io_out_bits_head (_des_2_io_out_bits_head), .io_out_bits_tail (_des_2_io_out_bits_tail), .io_busy (_des_2_io_busy) ); // @[TLSerdes.scala:86:23] GenericDeserializer_TLBeatw87_f32_1 des_3 ( // @[TLSerdes.scala:86:23] .clock (clock), .reset (reset), .io_in_ready (io_ser_3_in_ready_0), .io_in_valid (io_ser_3_in_valid_0), // @[TLSerdes.scala:39:9] .io_in_bits_flit (io_ser_3_in_bits_flit_0), // @[TLSerdes.scala:39:9] .io_out_ready (_in_channels_3_2_io_beat_ready), // @[TLSerdes.scala:81:28] .io_out_valid (_des_3_io_out_valid), .io_out_bits_payload (_des_3_io_out_bits_payload), .io_out_bits_head (_des_3_io_out_bits_head), .io_out_bits_tail (_des_3_io_out_bits_tail), .io_busy (_des_3_io_busy) ); // @[TLSerdes.scala:86:23] GenericDeserializer_TLBeatw88_f32_3 des_4 ( // @[TLSerdes.scala:86:23] .clock (clock), .reset (reset), .io_in_ready (io_ser_4_in_ready_0), .io_in_valid (io_ser_4_in_valid_0), // @[TLSerdes.scala:39:9] .io_in_bits_flit (io_ser_4_in_bits_flit_0), // @[TLSerdes.scala:39:9] .io_out_ready (_in_channels_4_2_io_beat_ready), // @[TLSerdes.scala:82:28] .io_out_valid (_des_4_io_out_valid), .io_out_bits_payload (_des_4_io_out_bits_payload), .io_out_bits_head (_des_4_io_out_bits_head), .io_out_bits_tail (_des_4_io_out_bits_tail), .io_busy (_des_4_io_busy) ); // @[TLSerdes.scala:86:23] assign auto_manager_in_a_ready = auto_manager_in_a_ready_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_valid = auto_manager_in_d_valid_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_opcode = auto_manager_in_d_bits_opcode_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_param = auto_manager_in_d_bits_param_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_size = auto_manager_in_d_bits_size_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_source = auto_manager_in_d_bits_source_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_sink = auto_manager_in_d_bits_sink_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_denied = auto_manager_in_d_bits_denied_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_data = auto_manager_in_d_bits_data_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_corrupt = auto_manager_in_d_bits_corrupt_0; // @[TLSerdes.scala:39:9] assign io_ser_0_in_ready = io_ser_0_in_ready_0; // @[TLSerdes.scala:39:9] assign io_ser_0_out_bits_flit = io_ser_0_out_bits_flit_0; // @[TLSerdes.scala:39:9] assign io_ser_1_in_ready = io_ser_1_in_ready_0; // @[TLSerdes.scala:39:9] assign io_ser_2_in_ready = io_ser_2_in_ready_0; // @[TLSerdes.scala:39:9] assign io_ser_2_out_valid = io_ser_2_out_valid_0; // @[TLSerdes.scala:39:9] assign io_ser_2_out_bits_flit = io_ser_2_out_bits_flit_0; // @[TLSerdes.scala:39:9] assign io_ser_3_in_ready = io_ser_3_in_ready_0; // @[TLSerdes.scala:39:9] assign io_ser_4_in_ready = io_ser_4_in_ready_0; // @[TLSerdes.scala:39:9] assign io_ser_4_out_valid = io_ser_4_out_valid_0; // @[TLSerdes.scala:39:9] assign io_ser_4_out_bits_flit = io_ser_4_out_bits_flit_0; // @[TLSerdes.scala:39:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RegisterFile.scala: package saturn.backend import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import freechips.rocketchip.tile.{CoreModule} import freechips.rocketchip.util._ import saturn.common._ class OldestRRArbiter(val n: Int)(implicit p: Parameters) extends Module { val io = IO(new ArbiterIO(new VectorReadReq, n)) val arb = Module(new RRArbiter(new VectorReadReq, n)) io <> arb.io val oldest_oh = io.in.map(i => i.valid && i.bits.oldest) //assert(PopCount(oldest_oh) <= 1.U) when (oldest_oh.orR) { io.chosen := VecInit(oldest_oh).asUInt io.out.valid := true.B io.out.bits := Mux1H(oldest_oh, io.in.map(_.bits)) for (i <- 0 until n) { io.in(i).ready := oldest_oh(i) && io.out.ready } } } class RegisterReadXbar(n: Int, banks: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val io = IO(new Bundle { val in = Vec(n, Flipped(new VectorReadIO)) val out = Vec(banks, new VectorReadIO) }) val arbs = Seq.fill(banks) { Module(new OldestRRArbiter(n)) } for (i <- 0 until banks) { io.out(i).req <> arbs(i).io.out } val bankOffset = log2Ceil(banks) for (i <- 0 until n) { val bank_sel = if (bankOffset == 0) true.B else UIntToOH(io.in(i).req.bits.eg(bankOffset-1,0)) for (j <- 0 until banks) { arbs(j).io.in(i).valid := io.in(i).req.valid && bank_sel(j) arbs(j).io.in(i).bits.eg := io.in(i).req.bits.eg >> bankOffset arbs(j).io.in(i).bits.oldest := io.in(i).req.bits.oldest } io.in(i).req.ready := Mux1H(bank_sel, arbs.map(_.io.in(i).ready)) io.in(i).resp := Mux1H(bank_sel, io.out.map(_.resp)) } } class RegisterFileBank(reads: Int, maskReads: Int, rows: Int, maskRows: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val io = IO(new Bundle { val read = Vec(reads, Flipped(new VectorReadIO)) val mask_read = Vec(maskReads, Flipped(new VectorReadIO)) val write = Input(Valid(new VectorWrite(dLen))) val ll_write = Flipped(Decoupled(new VectorWrite(dLen))) }) val ll_write_valid = RegInit(false.B) val ll_write_bits = Reg(new VectorWrite(dLen)) val vrf = Mem(rows, Vec(dLen, Bool())) val v0_mask = Mem(maskRows, Vec(dLen, Bool())) for (read <- io.read) { read.req.ready := !(ll_write_valid && read.req.bits.eg === ll_write_bits.eg) read.resp := DontCare when (read.req.valid) { read.resp := vrf.read(read.req.bits.eg).asUInt } } for (mask_read <- io.mask_read) { mask_read.req.ready := !(ll_write_valid && mask_read.req.bits.eg === ll_write_bits.eg) mask_read.resp := DontCare when (mask_read.req.valid) { mask_read.resp := v0_mask.read(mask_read.req.bits.eg).asUInt } } val write = WireInit(io.write) io.ll_write.ready := false.B if (vParams.vrfHiccupBuffer) { when (!io.write.valid) { // drain hiccup buffer write.valid := ll_write_valid || io.ll_write.valid write.bits := Mux(ll_write_valid, ll_write_bits, io.ll_write.bits) ll_write_valid := false.B when (io.ll_write.valid && ll_write_valid) { ll_write_valid := true.B ll_write_bits := io.ll_write.bits } io.ll_write.ready := true.B } .elsewhen (!ll_write_valid) { // fill hiccup buffer when (io.ll_write.valid) { ll_write_valid := true.B ll_write_bits := io.ll_write.bits } io.ll_write.ready := true.B } } else { when (!io.write.valid) { io.ll_write.ready := true.B write.valid := io.ll_write.valid write.bits := io.ll_write.bits } } when (write.valid) { vrf.write( write.bits.eg, VecInit(write.bits.data.asBools), write.bits.mask.asBools) when (write.bits.eg < maskRows.U) { v0_mask.write( write.bits.eg, VecInit(write.bits.data.asBools), write.bits.mask.asBools) } } } class RegisterFile(reads: Seq[Int], maskReads: Seq[Int], pipeWrites: Int, llWrites: Int)(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val nBanks = vParams.vrfBanking // Support 1, 2, and 4 banks for the VRF require(nBanks == 1 || nBanks == 2 || nBanks == 4) val io = IO(new Bundle { val read = MixedVec(reads.map(rc => Vec(rc, Flipped(new VectorReadIO)))) val mask_read = MixedVec(maskReads.map(rc => Vec(rc, Flipped(new VectorReadIO)))) val pipe_writes = Vec(pipeWrites, Input(Valid(new VectorWrite(dLen)))) val ll_writes = Vec(llWrites, Flipped(Decoupled(new VectorWrite(dLen)))) }) val vrf = Seq.fill(nBanks) { Module(new RegisterFileBank(reads.size, maskReads.size, egsTotal/nBanks, if (egsPerVReg < nBanks) 1 else egsPerVReg / nBanks)) } reads.zipWithIndex.foreach { case (rc, i) => val xbar = Module(new RegisterReadXbar(rc, nBanks)) vrf.zipWithIndex.foreach { case (bank, j) => bank.io.read(i) <> xbar.io.out(j) } xbar.io.in <> io.read(i) } maskReads.zipWithIndex.foreach { case (rc, i) => val mask_xbar = Module(new RegisterReadXbar(rc, nBanks)) vrf.zipWithIndex.foreach { case (bank, j) => bank.io.mask_read(i) <> mask_xbar.io.out(j) } mask_xbar.io.in <> io.mask_read(i) } io.ll_writes.foreach(_.ready := false.B) vrf.zipWithIndex.foreach { case (rf, i) => val bank_match = io.pipe_writes.map { w => (w.bits.bankId === i.U) && w.valid } val bank_write_data = Mux1H(bank_match, io.pipe_writes.map(_.bits.data)) val bank_write_mask = Mux1H(bank_match, io.pipe_writes.map(_.bits.mask)) val bank_write_eg = Mux1H(bank_match, io.pipe_writes.map(_.bits.eg)) val bank_write_valid = bank_match.orR rf.io.write.valid := bank_write_valid rf.io.write.bits.data := bank_write_data rf.io.write.bits.mask := bank_write_mask rf.io.write.bits.eg := bank_write_eg >> vrfBankBits when (bank_write_valid) { PopCount(bank_match) === 1.U } val ll_arb = Module(new Arbiter(new VectorWrite(dLen), llWrites)) rf.io.ll_write <> ll_arb.io.out io.ll_writes.zipWithIndex.foreach { case (w, j) => ll_arb.io.in(j).valid := w.valid && w.bits.bankId === i.U ll_arb.io.in(j).bits.eg := w.bits.eg >> vrfBankBits ll_arb.io.in(j).bits.data := w.bits.data ll_arb.io.in(j).bits.mask := w.bits.mask when (ll_arb.io.in(j).ready && w.bits.bankId === i.U) { w.ready := true.B } } } }
module vrf_16x64( // @[RegisterFile.scala:62:16] input [3:0] R0_addr, input R0_en, input R0_clk, output [63:0] R0_data, input [3:0] R1_addr, input R1_en, input R1_clk, output [63:0] R1_data, input [3:0] R2_addr, input R2_en, input R2_clk, output [63:0] R2_data, input [3:0] W0_addr, input W0_en, input W0_clk, input [63:0] W0_data, input [63:0] W0_mask ); reg [63:0] Memory[0:15]; // @[RegisterFile.scala:62:16] always @(posedge W0_clk) begin // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[0]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h0 +: 1] <= W0_data[0]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[1]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h1 +: 1] <= W0_data[1]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[2]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h2 +: 1] <= W0_data[2]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[3]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h3 +: 1] <= W0_data[3]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[4]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h4 +: 1] <= W0_data[4]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[5]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h5 +: 1] <= W0_data[5]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[6]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h6 +: 1] <= W0_data[6]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[7]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h7 +: 1] <= W0_data[7]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[8]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h8 +: 1] <= W0_data[8]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[9]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h9 +: 1] <= W0_data[9]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[10]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'hA +: 1] <= W0_data[10]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[11]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'hB +: 1] <= W0_data[11]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[12]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'hC +: 1] <= W0_data[12]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[13]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'hD +: 1] <= W0_data[13]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[14]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'hE +: 1] <= W0_data[14]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[15]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'hF +: 1] <= W0_data[15]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[16]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h10 +: 1] <= W0_data[16]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[17]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h11 +: 1] <= W0_data[17]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[18]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h12 +: 1] <= W0_data[18]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[19]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h13 +: 1] <= W0_data[19]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[20]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h14 +: 1] <= W0_data[20]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[21]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h15 +: 1] <= W0_data[21]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[22]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h16 +: 1] <= W0_data[22]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[23]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h17 +: 1] <= W0_data[23]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[24]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h18 +: 1] <= W0_data[24]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[25]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h19 +: 1] <= W0_data[25]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[26]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h1A +: 1] <= W0_data[26]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[27]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h1B +: 1] <= W0_data[27]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[28]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h1C +: 1] <= W0_data[28]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[29]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h1D +: 1] <= W0_data[29]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[30]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h1E +: 1] <= W0_data[30]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[31]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h1F +: 1] <= W0_data[31]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[32]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h20 +: 1] <= W0_data[32]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[33]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h21 +: 1] <= W0_data[33]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[34]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h22 +: 1] <= W0_data[34]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[35]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h23 +: 1] <= W0_data[35]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[36]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h24 +: 1] <= W0_data[36]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[37]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h25 +: 1] <= W0_data[37]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[38]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h26 +: 1] <= W0_data[38]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[39]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h27 +: 1] <= W0_data[39]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[40]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h28 +: 1] <= W0_data[40]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[41]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h29 +: 1] <= W0_data[41]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[42]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h2A +: 1] <= W0_data[42]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[43]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h2B +: 1] <= W0_data[43]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[44]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h2C +: 1] <= W0_data[44]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[45]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h2D +: 1] <= W0_data[45]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[46]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h2E +: 1] <= W0_data[46]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[47]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h2F +: 1] <= W0_data[47]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[48]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h30 +: 1] <= W0_data[48]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[49]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h31 +: 1] <= W0_data[49]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[50]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h32 +: 1] <= W0_data[50]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[51]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h33 +: 1] <= W0_data[51]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[52]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h34 +: 1] <= W0_data[52]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[53]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h35 +: 1] <= W0_data[53]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[54]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h36 +: 1] <= W0_data[54]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[55]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h37 +: 1] <= W0_data[55]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[56]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h38 +: 1] <= W0_data[56]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[57]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h39 +: 1] <= W0_data[57]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[58]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h3A +: 1] <= W0_data[58]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[59]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h3B +: 1] <= W0_data[59]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[60]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h3C +: 1] <= W0_data[60]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[61]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h3D +: 1] <= W0_data[61]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[62]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h3E +: 1] <= W0_data[62]; // @[RegisterFile.scala:62:16] if (W0_en & W0_mask[63]) // @[RegisterFile.scala:62:16] Memory[W0_addr][32'h3F +: 1] <= W0_data[63]; // @[RegisterFile.scala:62:16] always @(posedge)
Generate the Verilog code corresponding to the following Chisel files. File AsyncResetReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ /** This black-boxes an Async Reset * (or Set) * Register. * * Because Chisel doesn't support * parameterized black boxes, * we unfortunately have to * instantiate a number of these. * * We also have to hard-code the set/ * reset behavior. * * Do not confuse an asynchronous * reset signal with an asynchronously * reset reg. You should still * properly synchronize your reset * deassertion. * * @param d Data input * @param q Data Output * @param clk Clock Input * @param rst Reset Input * @param en Write Enable Input * */ class AsyncResetReg(resetValue: Int = 0) extends RawModule { val io = IO(new Bundle { val d = Input(Bool()) val q = Output(Bool()) val en = Input(Bool()) val clk = Input(Clock()) val rst = Input(Reset()) }) val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W))) when (io.en) { reg := io.d } io.q := reg } class SimpleRegIO(val w: Int) extends Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) } class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module { override def desiredName = s"AsyncResetRegVec_w${w}_i${init}" val io = IO(new SimpleRegIO(w)) val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W))) when (io.en) { reg := io.d } io.q := reg } object AsyncResetReg { // Create Single Registers def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = { val reg = Module(new AsyncResetReg(if (init) 1 else 0)) reg.io.d := d reg.io.clk := clk reg.io.rst := rst reg.io.en := true.B name.foreach(reg.suggestName(_)) reg.io.q } def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None) def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name)) // Create Vectors of Registers def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = { val w = updateData.getWidth max resetData.bitLength val reg = Module(new AsyncResetRegVec(w, resetData)) name.foreach(reg.suggestName(_)) reg.io.d := updateData reg.io.en := enable reg.io.q } def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData, resetData, enable, Some(name)) def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B) def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name)) def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable) def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name)) def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B) def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name)) }
module AsyncResetRegVec_w1_i0_9( // @[AsyncResetReg.scala:56:7] input clock, // @[AsyncResetReg.scala:56:7] input reset, // @[AsyncResetReg.scala:56:7] input io_d, // @[AsyncResetReg.scala:59:14] output io_q // @[AsyncResetReg.scala:59:14] ); wire io_d_0 = io_d; // @[AsyncResetReg.scala:56:7] wire _reg_T = reset; // @[AsyncResetReg.scala:61:29] wire io_en = 1'h1; // @[AsyncResetReg.scala:56:7, :59:14] wire io_q_0; // @[AsyncResetReg.scala:56:7] reg reg_0; // @[AsyncResetReg.scala:61:50] assign io_q_0 = reg_0; // @[AsyncResetReg.scala:56:7, :61:50] always @(posedge clock or posedge _reg_T) begin // @[AsyncResetReg.scala:56:7, :61:29] if (_reg_T) // @[AsyncResetReg.scala:56:7, :61:29] reg_0 <= 1'h0; // @[AsyncResetReg.scala:61:50] else // @[AsyncResetReg.scala:56:7] reg_0 <= io_d_0; // @[AsyncResetReg.scala:56:7, :61:50] always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_33( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [7:0] io_in_a_0, // @[Tile.scala:17:14] input [19:0] io_in_b_0, // @[Tile.scala:17:14] input [19:0] io_in_d_0, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [2:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [7:0] io_out_a_0, // @[Tile.scala:17:14] output [19:0] io_out_c_0, // @[Tile.scala:17:14] output [19:0] io_out_b_0, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [2:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0 // @[Tile.scala:17:14] ); wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7] wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7] wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44] wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] PE_289 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a (io_in_a_0_0), // @[Tile.scala:16:7] .io_in_b (io_in_b_0_0), // @[Tile.scala:16:7] .io_in_d (io_in_d_0_0), // @[Tile.scala:16:7] .io_out_a (io_out_a_0_0), .io_out_b (io_out_b_0_0), .io_out_c (io_out_c_0_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0) ); // @[Tile.scala:42:44] assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7] assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7] assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } File consts.scala: //****************************************************************************** // Copyright (c) 2011 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Constants //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.common.constants import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util.Str import freechips.rocketchip.rocket.RVCExpander /** * Mixin for issue queue types */ trait IQType { val IQT_SZ = 3 val IQT_INT = 1.U(IQT_SZ.W) val IQT_MEM = 2.U(IQT_SZ.W) val IQT_FP = 4.U(IQT_SZ.W) val IQT_MFP = 6.U(IQT_SZ.W) } /** * Mixin for scalar operation constants */ trait ScalarOpConstants { val X = BitPat("b?") val Y = BitPat("b1") val N = BitPat("b0") //************************************ // Extra Constants // Which branch predictor predicted us val BSRC_SZ = 2 val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred val BSRC_C = 3.U(BSRC_SZ.W) // core branch resolution //************************************ // Control Signals // CFI types val CFI_SZ = 3 val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction val CFI_BR = 1.U(CFI_SZ.W) // Branch val CFI_JAL = 2.U(CFI_SZ.W) // JAL val CFI_JALR = 3.U(CFI_SZ.W) // JALR // PC Select Signal val PC_PLUS4 = 0.U(2.W) // PC + 4 val PC_BRJMP = 1.U(2.W) // brjmp_target val PC_JALR = 2.U(2.W) // jump_reg_target // Branch Type val BR_N = 0.U(4.W) // Next val BR_NE = 1.U(4.W) // Branch on NotEqual val BR_EQ = 2.U(4.W) // Branch on Equal val BR_GE = 3.U(4.W) // Branch on Greater/Equal val BR_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned val BR_LT = 5.U(4.W) // Branch on Less Than val BR_LTU = 6.U(4.W) // Branch on Less Than Unsigned val BR_J = 7.U(4.W) // Jump val BR_JR = 8.U(4.W) // Jump Register // RS1 Operand Select Signal val OP1_RS1 = 0.U(2.W) // Register Source #1 val OP1_ZERO= 1.U(2.W) val OP1_PC = 2.U(2.W) val OP1_X = BitPat("b??") // RS2 Operand Select Signal val OP2_RS2 = 0.U(3.W) // Register Source #2 val OP2_IMM = 1.U(3.W) // immediate val OP2_ZERO= 2.U(3.W) // constant 0 val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4) val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1 val OP2_X = BitPat("b???") // Register File Write Enable Signal val REN_0 = false.B val REN_1 = true.B // Is 32b Word or 64b Doubldword? val SZ_DW = 1 val DW_X = true.B // Bool(xLen==64) val DW_32 = false.B val DW_64 = true.B val DW_XPR = true.B // Bool(xLen==64) // Memory Enable Signal val MEN_0 = false.B val MEN_1 = true.B val MEN_X = false.B // Immediate Extend Select val IS_I = 0.U(3.W) // I-Type (LD,ALU) val IS_S = 1.U(3.W) // S-Type (ST) val IS_B = 2.U(3.W) // SB-Type (BR) val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC) val IS_J = 4.U(3.W) // UJ-Type (J/JAL) val IS_X = BitPat("b???") // Decode Stage Control Signals val RT_FIX = 0.U(2.W) val RT_FLT = 1.U(2.W) val RT_PAS = 3.U(2.W) // pass-through (prs1 := lrs1, etc) val RT_X = 2.U(2.W) // not-a-register (but shouldn't get a busy-bit, etc.) // TODO rename RT_NAR // Micro-op opcodes // TODO change micro-op opcodes into using enum val UOPC_SZ = 7 val uopX = BitPat.dontCare(UOPC_SZ) val uopNOP = 0.U(UOPC_SZ.W) val uopLD = 1.U(UOPC_SZ.W) val uopSTA = 2.U(UOPC_SZ.W) // store address generation val uopSTD = 3.U(UOPC_SZ.W) // store data generation val uopLUI = 4.U(UOPC_SZ.W) val uopADDI = 5.U(UOPC_SZ.W) val uopANDI = 6.U(UOPC_SZ.W) val uopORI = 7.U(UOPC_SZ.W) val uopXORI = 8.U(UOPC_SZ.W) val uopSLTI = 9.U(UOPC_SZ.W) val uopSLTIU= 10.U(UOPC_SZ.W) val uopSLLI = 11.U(UOPC_SZ.W) val uopSRAI = 12.U(UOPC_SZ.W) val uopSRLI = 13.U(UOPC_SZ.W) val uopSLL = 14.U(UOPC_SZ.W) val uopADD = 15.U(UOPC_SZ.W) val uopSUB = 16.U(UOPC_SZ.W) val uopSLT = 17.U(UOPC_SZ.W) val uopSLTU = 18.U(UOPC_SZ.W) val uopAND = 19.U(UOPC_SZ.W) val uopOR = 20.U(UOPC_SZ.W) val uopXOR = 21.U(UOPC_SZ.W) val uopSRA = 22.U(UOPC_SZ.W) val uopSRL = 23.U(UOPC_SZ.W) val uopBEQ = 24.U(UOPC_SZ.W) val uopBNE = 25.U(UOPC_SZ.W) val uopBGE = 26.U(UOPC_SZ.W) val uopBGEU = 27.U(UOPC_SZ.W) val uopBLT = 28.U(UOPC_SZ.W) val uopBLTU = 29.U(UOPC_SZ.W) val uopCSRRW= 30.U(UOPC_SZ.W) val uopCSRRS= 31.U(UOPC_SZ.W) val uopCSRRC= 32.U(UOPC_SZ.W) val uopCSRRWI=33.U(UOPC_SZ.W) val uopCSRRSI=34.U(UOPC_SZ.W) val uopCSRRCI=35.U(UOPC_SZ.W) val uopJ = 36.U(UOPC_SZ.W) val uopJAL = 37.U(UOPC_SZ.W) val uopJALR = 38.U(UOPC_SZ.W) val uopAUIPC= 39.U(UOPC_SZ.W) //val uopSRET = 40.U(UOPC_SZ.W) val uopCFLSH= 41.U(UOPC_SZ.W) val uopFENCE= 42.U(UOPC_SZ.W) val uopADDIW= 43.U(UOPC_SZ.W) val uopADDW = 44.U(UOPC_SZ.W) val uopSUBW = 45.U(UOPC_SZ.W) val uopSLLIW= 46.U(UOPC_SZ.W) val uopSLLW = 47.U(UOPC_SZ.W) val uopSRAIW= 48.U(UOPC_SZ.W) val uopSRAW = 49.U(UOPC_SZ.W) val uopSRLIW= 50.U(UOPC_SZ.W) val uopSRLW = 51.U(UOPC_SZ.W) val uopMUL = 52.U(UOPC_SZ.W) val uopMULH = 53.U(UOPC_SZ.W) val uopMULHU= 54.U(UOPC_SZ.W) val uopMULHSU=55.U(UOPC_SZ.W) val uopMULW = 56.U(UOPC_SZ.W) val uopDIV = 57.U(UOPC_SZ.W) val uopDIVU = 58.U(UOPC_SZ.W) val uopREM = 59.U(UOPC_SZ.W) val uopREMU = 60.U(UOPC_SZ.W) val uopDIVW = 61.U(UOPC_SZ.W) val uopDIVUW= 62.U(UOPC_SZ.W) val uopREMW = 63.U(UOPC_SZ.W) val uopREMUW= 64.U(UOPC_SZ.W) val uopFENCEI = 65.U(UOPC_SZ.W) // = 66.U(UOPC_SZ.W) val uopAMO_AG = 67.U(UOPC_SZ.W) // AMO-address gen (use normal STD for datagen) val uopFMV_W_X = 68.U(UOPC_SZ.W) val uopFMV_D_X = 69.U(UOPC_SZ.W) val uopFMV_X_W = 70.U(UOPC_SZ.W) val uopFMV_X_D = 71.U(UOPC_SZ.W) val uopFSGNJ_S = 72.U(UOPC_SZ.W) val uopFSGNJ_D = 73.U(UOPC_SZ.W) val uopFCVT_S_D = 74.U(UOPC_SZ.W) val uopFCVT_D_S = 75.U(UOPC_SZ.W) val uopFCVT_S_X = 76.U(UOPC_SZ.W) val uopFCVT_D_X = 77.U(UOPC_SZ.W) val uopFCVT_X_S = 78.U(UOPC_SZ.W) val uopFCVT_X_D = 79.U(UOPC_SZ.W) val uopCMPR_S = 80.U(UOPC_SZ.W) val uopCMPR_D = 81.U(UOPC_SZ.W) val uopFCLASS_S = 82.U(UOPC_SZ.W) val uopFCLASS_D = 83.U(UOPC_SZ.W) val uopFMINMAX_S = 84.U(UOPC_SZ.W) val uopFMINMAX_D = 85.U(UOPC_SZ.W) // = 86.U(UOPC_SZ.W) val uopFADD_S = 87.U(UOPC_SZ.W) val uopFSUB_S = 88.U(UOPC_SZ.W) val uopFMUL_S = 89.U(UOPC_SZ.W) val uopFADD_D = 90.U(UOPC_SZ.W) val uopFSUB_D = 91.U(UOPC_SZ.W) val uopFMUL_D = 92.U(UOPC_SZ.W) val uopFMADD_S = 93.U(UOPC_SZ.W) val uopFMSUB_S = 94.U(UOPC_SZ.W) val uopFNMADD_S = 95.U(UOPC_SZ.W) val uopFNMSUB_S = 96.U(UOPC_SZ.W) val uopFMADD_D = 97.U(UOPC_SZ.W) val uopFMSUB_D = 98.U(UOPC_SZ.W) val uopFNMADD_D = 99.U(UOPC_SZ.W) val uopFNMSUB_D = 100.U(UOPC_SZ.W) val uopFDIV_S = 101.U(UOPC_SZ.W) val uopFDIV_D = 102.U(UOPC_SZ.W) val uopFSQRT_S = 103.U(UOPC_SZ.W) val uopFSQRT_D = 104.U(UOPC_SZ.W) val uopWFI = 105.U(UOPC_SZ.W) // pass uop down the CSR pipeline val uopERET = 106.U(UOPC_SZ.W) // pass uop down the CSR pipeline, also is ERET val uopSFENCE = 107.U(UOPC_SZ.W) val uopROCC = 108.U(UOPC_SZ.W) val uopMOV = 109.U(UOPC_SZ.W) // conditional mov decoded from "add rd, x0, rs2" // The Bubble Instruction (Machine generated NOP) // Insert (XOR x0,x0,x0) which is different from software compiler // generated NOPs which are (ADDI x0, x0, 0). // Reasoning for this is to let visualizers and stat-trackers differentiate // between software NOPs and machine-generated Bubbles in the pipeline. val BUBBLE = (0x4033).U(32.W) def NullMicroOp()(implicit p: Parameters): boom.v3.common.MicroOp = { val uop = Wire(new boom.v3.common.MicroOp) uop := DontCare // Overridden in the following lines uop.uopc := uopNOP // maybe not required, but helps on asserts that try to catch spurious behavior uop.bypassable := false.B uop.fp_val := false.B uop.uses_stq := false.B uop.uses_ldq := false.B uop.pdst := 0.U uop.dst_rtype := RT_X val cs = Wire(new boom.v3.common.CtrlSignals()) cs := DontCare // Overridden in the following lines cs.br_type := BR_N cs.csr_cmd := freechips.rocketchip.rocket.CSR.N cs.is_load := false.B cs.is_sta := false.B cs.is_std := false.B uop.ctrl := cs uop } } /** * Mixin for RISCV constants */ trait RISCVConstants { // abstract out instruction decode magic numbers val RD_MSB = 11 val RD_LSB = 7 val RS1_MSB = 19 val RS1_LSB = 15 val RS2_MSB = 24 val RS2_LSB = 20 val RS3_MSB = 31 val RS3_LSB = 27 val CSR_ADDR_MSB = 31 val CSR_ADDR_LSB = 20 val CSR_ADDR_SZ = 12 // location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.) val SHAMT_5_BIT = 25 val LONGEST_IMM_SZ = 20 val X0 = 0.U val RA = 1.U // return address register // memory consistency model // The C/C++ atomics MCM requires that two loads to the same address maintain program order. // The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior). val MCM_ORDER_DEPENDENT_LOADS = true val jal_opc = (0x6f).U val jalr_opc = (0x67).U def GetUop(inst: UInt): UInt = inst(6,0) def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB) def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB) def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = { val rvc_exp = Module(new RVCExpander) rvc_exp.io.in := inst Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst) } // Note: Accepts only EXPANDED rvc instructions def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) ((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) ((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = { val bdecode = Module(new boom.v3.exu.BranchDecode) bdecode.io.inst := inst bdecode.io.pc := 0.U bdecode.io.out.cfi_type } } /** * Mixin for exception cause constants */ trait ExcCauseConstants { // a memory disambigious misspeculation occurred val MINI_EXCEPTION_MEM_ORDERING = 16.U val MINI_EXCEPTION_CSR_REPLAY = 17.U require (!freechips.rocketchip.rocket.Causes.all.contains(16)) require (!freechips.rocketchip.rocket.Causes.all.contains(17)) } File issue-slot.scala: //****************************************************************************** // Copyright (c) 2015 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Issue Slot Logic //-------------------------------------------------------------------------- //------------------------------------------------------------------------------ // // Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot. // TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores. // TODO Disable ldspec for FP queue. package boom.v3.exu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import boom.v3.common._ import boom.v3.util._ import FUConstants._ /** * IO bundle to interact with Issue slot * * @param numWakeupPorts number of wakeup ports for the slot */ class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle { val valid = Output(Bool()) val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely? val request = Output(Bool()) val request_hp = Output(Bool()) val grant = Input(Bool()) val brupdate = Input(new BrUpdateInfo()) val kill = Input(Bool()) // pipeline flush val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant) val ldspec_miss = Input(Bool()) // Previous cycle's speculative load wakeup was mispredicted. val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new IqWakeup(maxPregSz)))) val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W))) val spec_ld_wakeup = Flipped(Vec(memWidth, Valid(UInt(width=maxPregSz.W)))) val in_uop = Flipped(Valid(new MicroOp())) // if valid, this WILL overwrite an entry! val out_uop = Output(new MicroOp()) // the updated slot uop; will be shifted upwards in a collasping queue. val uop = Output(new MicroOp()) // the current Slot's uop. Sent down the pipeline when issued. val debug = { val result = new Bundle { val p1 = Bool() val p2 = Bool() val p3 = Bool() val ppred = Bool() val state = UInt(width=2.W) } Output(result) } } /** * Single issue slot. Holds a uop within the issue queue * * @param numWakeupPorts number of wakeup ports */ class IssueSlot(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomModule with IssueUnitConstants { val io = IO(new IssueSlotIO(numWakeupPorts)) // slot invalid? // slot is valid, holding 1 uop // slot is valid, holds 2 uops (like a store) def is_invalid = state === s_invalid def is_valid = state =/= s_invalid val next_state = Wire(UInt()) // the next state of this slot (which might then get moved to a new slot) val next_uopc = Wire(UInt()) // the next uopc of this slot (which might then get moved to a new slot) val next_lrs1_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot) val next_lrs2_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot) val state = RegInit(s_invalid) val p1 = RegInit(false.B) val p2 = RegInit(false.B) val p3 = RegInit(false.B) val ppred = RegInit(false.B) // Poison if woken up by speculative load. // Poison lasts 1 cycle (as ldMiss will come on the next cycle). // SO if poisoned is true, set it to false! val p1_poisoned = RegInit(false.B) val p2_poisoned = RegInit(false.B) p1_poisoned := false.B p2_poisoned := false.B val next_p1_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p1_poisoned, p1_poisoned) val next_p2_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p2_poisoned, p2_poisoned) val slot_uop = RegInit(NullMicroOp) val next_uop = Mux(io.in_uop.valid, io.in_uop.bits, slot_uop) //----------------------------------------------------------------------------- // next slot state computation // compute the next state for THIS entry slot (in a collasping queue, the // current uop may get moved elsewhere, and a new uop can enter when (io.kill) { state := s_invalid } .elsewhen (io.in_uop.valid) { state := io.in_uop.bits.iw_state } .elsewhen (io.clear) { state := s_invalid } .otherwise { state := next_state } //----------------------------------------------------------------------------- // "update" state // compute the next state for the micro-op in this slot. This micro-op may // be moved elsewhere, so the "next_state" travels with it. // defaults next_state := state next_uopc := slot_uop.uopc next_lrs1_rtype := slot_uop.lrs1_rtype next_lrs2_rtype := slot_uop.lrs2_rtype when (io.kill) { next_state := s_invalid } .elsewhen ((io.grant && (state === s_valid_1)) || (io.grant && (state === s_valid_2) && p1 && p2 && ppred)) { // try to issue this uop. when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) { next_state := s_invalid } } .elsewhen (io.grant && (state === s_valid_2)) { when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) { next_state := s_valid_1 when (p1) { slot_uop.uopc := uopSTD next_uopc := uopSTD slot_uop.lrs1_rtype := RT_X next_lrs1_rtype := RT_X } .otherwise { slot_uop.lrs2_rtype := RT_X next_lrs2_rtype := RT_X } } } when (io.in_uop.valid) { slot_uop := io.in_uop.bits assert (is_invalid || io.clear || io.kill, "trying to overwrite a valid issue slot.") } // Wakeup Compare Logic // these signals are the "next_p*" for the current slot's micro-op. // they are important for shifting the current slot_uop up to an other entry. val next_p1 = WireInit(p1) val next_p2 = WireInit(p2) val next_p3 = WireInit(p3) val next_ppred = WireInit(ppred) when (io.in_uop.valid) { p1 := !(io.in_uop.bits.prs1_busy) p2 := !(io.in_uop.bits.prs2_busy) p3 := !(io.in_uop.bits.prs3_busy) ppred := !(io.in_uop.bits.ppred_busy) } when (io.ldspec_miss && next_p1_poisoned) { assert(next_uop.prs1 =/= 0.U, "Poison bit can't be set for prs1=x0!") p1 := false.B } when (io.ldspec_miss && next_p2_poisoned) { assert(next_uop.prs2 =/= 0.U, "Poison bit can't be set for prs2=x0!") p2 := false.B } for (i <- 0 until numWakeupPorts) { when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs1)) { p1 := true.B } when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs2)) { p2 := true.B } when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs3)) { p3 := true.B } } when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === next_uop.ppred) { ppred := true.B } for (w <- 0 until memWidth) { assert (!(io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === 0.U), "Loads to x0 should never speculatively wakeup other instructions") } // TODO disable if FP IQ. for (w <- 0 until memWidth) { when (io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === next_uop.prs1 && next_uop.lrs1_rtype === RT_FIX) { p1 := true.B p1_poisoned := true.B assert (!next_p1_poisoned) } when (io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === next_uop.prs2 && next_uop.lrs2_rtype === RT_FIX) { p2 := true.B p2_poisoned := true.B assert (!next_p2_poisoned) } } // Handle branch misspeculations val next_br_mask = GetNewBrMask(io.brupdate, slot_uop) // was this micro-op killed by a branch? if yes, we can't let it be valid if // we compact it into an other entry when (IsKilledByBranch(io.brupdate, slot_uop)) { next_state := s_invalid } when (!io.in_uop.valid) { slot_uop.br_mask := next_br_mask } //------------------------------------------------------------- // Request Logic io.request := is_valid && p1 && p2 && p3 && ppred && !io.kill val high_priority = slot_uop.is_br || slot_uop.is_jal || slot_uop.is_jalr io.request_hp := io.request && high_priority when (state === s_valid_1) { io.request := p1 && p2 && p3 && ppred && !io.kill } .elsewhen (state === s_valid_2) { io.request := (p1 || p2) && ppred && !io.kill } .otherwise { io.request := false.B } //assign outputs io.valid := is_valid io.uop := slot_uop io.uop.iw_p1_poisoned := p1_poisoned io.uop.iw_p2_poisoned := p2_poisoned // micro-op will vacate due to grant. val may_vacate = io.grant && ((state === s_valid_1) || (state === s_valid_2) && p1 && p2 && ppred) val squash_grant = io.ldspec_miss && (p1_poisoned || p2_poisoned) io.will_be_valid := is_valid && !(may_vacate && !squash_grant) io.out_uop := slot_uop io.out_uop.iw_state := next_state io.out_uop.uopc := next_uopc io.out_uop.lrs1_rtype := next_lrs1_rtype io.out_uop.lrs2_rtype := next_lrs2_rtype io.out_uop.br_mask := next_br_mask io.out_uop.prs1_busy := !p1 io.out_uop.prs2_busy := !p2 io.out_uop.prs3_busy := !p3 io.out_uop.ppred_busy := !ppred io.out_uop.iw_p1_poisoned := p1_poisoned io.out_uop.iw_p2_poisoned := p2_poisoned when (state === s_valid_2) { when (p1 && p2 && ppred) { ; // send out the entire instruction as one uop } .elsewhen (p1 && ppred) { io.uop.uopc := slot_uop.uopc io.uop.lrs2_rtype := RT_X } .elsewhen (p2 && ppred) { io.uop.uopc := uopSTD io.uop.lrs1_rtype := RT_X } } // debug outputs io.debug.p1 := p1 io.debug.p2 := p2 io.debug.p3 := p3 io.debug.ppred := ppred io.debug.state := state }
module IssueSlot_61( // @[issue-slot.scala:69:7] input clock, // @[issue-slot.scala:69:7] input reset, // @[issue-slot.scala:69:7] output io_valid, // @[issue-slot.scala:73:14] output io_will_be_valid, // @[issue-slot.scala:73:14] output io_request, // @[issue-slot.scala:73:14] output io_request_hp, // @[issue-slot.scala:73:14] input io_grant, // @[issue-slot.scala:73:14] input [15:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:73:14] input [15:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_uopc, // @[issue-slot.scala:73:14] input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:73:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:73:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[issue-slot.scala:73:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[issue-slot.scala:73:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_load, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_std, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_br, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_jalr, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_jal, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:73:14] input [15:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:73:14] input [3:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_taken, // @[issue-slot.scala:73:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:73:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_exception, // @[issue-slot.scala:73:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bypassable, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ldst_val, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_fp_single, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:73:14] input io_brupdate_b2_valid, // @[issue-slot.scala:73:14] input io_brupdate_b2_mispredict, // @[issue-slot.scala:73:14] input io_brupdate_b2_taken, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:73:14] input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:73:14] input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:73:14] input io_kill, // @[issue-slot.scala:73:14] input io_clear, // @[issue-slot.scala:73:14] input io_ldspec_miss, // @[issue-slot.scala:73:14] input io_wakeup_ports_0_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_0_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_0_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_1_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_1_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_1_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_2_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_2_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_2_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_3_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_3_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_3_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_4_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_4_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_4_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_5_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_5_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_5_bits_poisoned, // @[issue-slot.scala:73:14] input io_wakeup_ports_6_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_6_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_6_bits_poisoned, // @[issue-slot.scala:73:14] input io_spec_ld_wakeup_0_valid, // @[issue-slot.scala:73:14] input [6:0] io_spec_ld_wakeup_0_bits, // @[issue-slot.scala:73:14] input io_in_uop_valid, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_uopc, // @[issue-slot.scala:73:14] input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:73:14] input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_rvc, // @[issue-slot.scala:73:14] input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_iq_type, // @[issue-slot.scala:73:14] input [9:0] io_in_uop_bits_fu_code, // @[issue-slot.scala:73:14] input [3:0] io_in_uop_bits_ctrl_br_type, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_ctrl_op1_sel, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_op2_sel, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_imm_sel, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ctrl_op_fcn, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_fcn_dw, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_csr_cmd, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_load, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_sta, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_std, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_iw_state, // @[issue-slot.scala:73:14] input io_in_uop_bits_iw_p1_poisoned, // @[issue-slot.scala:73:14] input io_in_uop_bits_iw_p2_poisoned, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_br, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_jalr, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_jal, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_sfb, // @[issue-slot.scala:73:14] input [15:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:73:14] input [3:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:73:14] input io_in_uop_bits_edge_inst, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:73:14] input io_in_uop_bits_taken, // @[issue-slot.scala:73:14] input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:73:14] input [11:0] io_in_uop_bits_csr_addr, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_pdst, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_prs1, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_prs2, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_prs3, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ppred, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:73:14] input io_in_uop_bits_exception, // @[issue-slot.scala:73:14] input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:73:14] input io_in_uop_bits_bypassable, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:73:14] input io_in_uop_bits_mem_signed, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_fence, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_fencei, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_amo, // @[issue-slot.scala:73:14] input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:73:14] input io_in_uop_bits_uses_stq, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_unique, // @[issue-slot.scala:73:14] input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:73:14] input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:73:14] input io_in_uop_bits_ldst_val, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:73:14] input io_in_uop_bits_frs3_en, // @[issue-slot.scala:73:14] input io_in_uop_bits_fp_val, // @[issue-slot.scala:73:14] input io_in_uop_bits_fp_single, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_uopc, // @[issue-slot.scala:73:14] output [31:0] io_out_uop_inst, // @[issue-slot.scala:73:14] output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:73:14] output io_out_uop_is_rvc, // @[issue-slot.scala:73:14] output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_iq_type, // @[issue-slot.scala:73:14] output [9:0] io_out_uop_fu_code, // @[issue-slot.scala:73:14] output [3:0] io_out_uop_ctrl_br_type, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_load, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_std, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_iw_state, // @[issue-slot.scala:73:14] output io_out_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14] output io_out_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14] output io_out_uop_is_br, // @[issue-slot.scala:73:14] output io_out_uop_is_jalr, // @[issue-slot.scala:73:14] output io_out_uop_is_jal, // @[issue-slot.scala:73:14] output io_out_uop_is_sfb, // @[issue-slot.scala:73:14] output [15:0] io_out_uop_br_mask, // @[issue-slot.scala:73:14] output [3:0] io_out_uop_br_tag, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ftq_idx, // @[issue-slot.scala:73:14] output io_out_uop_edge_inst, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:73:14] output io_out_uop_taken, // @[issue-slot.scala:73:14] output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:73:14] output [11:0] io_out_uop_csr_addr, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_rob_idx, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ldq_idx, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_stq_idx, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_pdst, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_prs1, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_prs2, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_prs3, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ppred, // @[issue-slot.scala:73:14] output io_out_uop_prs1_busy, // @[issue-slot.scala:73:14] output io_out_uop_prs2_busy, // @[issue-slot.scala:73:14] output io_out_uop_prs3_busy, // @[issue-slot.scala:73:14] output io_out_uop_ppred_busy, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_stale_pdst, // @[issue-slot.scala:73:14] output io_out_uop_exception, // @[issue-slot.scala:73:14] output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:73:14] output io_out_uop_bypassable, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:73:14] output io_out_uop_mem_signed, // @[issue-slot.scala:73:14] output io_out_uop_is_fence, // @[issue-slot.scala:73:14] output io_out_uop_is_fencei, // @[issue-slot.scala:73:14] output io_out_uop_is_amo, // @[issue-slot.scala:73:14] output io_out_uop_uses_ldq, // @[issue-slot.scala:73:14] output io_out_uop_uses_stq, // @[issue-slot.scala:73:14] output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] output io_out_uop_is_unique, // @[issue-slot.scala:73:14] output io_out_uop_flush_on_commit, // @[issue-slot.scala:73:14] output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_ldst, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:73:14] output io_out_uop_ldst_val, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:73:14] output io_out_uop_frs3_en, // @[issue-slot.scala:73:14] output io_out_uop_fp_val, // @[issue-slot.scala:73:14] output io_out_uop_fp_single, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] output io_out_uop_bp_debug_if, // @[issue-slot.scala:73:14] output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:73:14] output [6:0] io_uop_uopc, // @[issue-slot.scala:73:14] output [31:0] io_uop_inst, // @[issue-slot.scala:73:14] output [31:0] io_uop_debug_inst, // @[issue-slot.scala:73:14] output io_uop_is_rvc, // @[issue-slot.scala:73:14] output [39:0] io_uop_debug_pc, // @[issue-slot.scala:73:14] output [2:0] io_uop_iq_type, // @[issue-slot.scala:73:14] output [9:0] io_uop_fu_code, // @[issue-slot.scala:73:14] output [3:0] io_uop_ctrl_br_type, // @[issue-slot.scala:73:14] output [1:0] io_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] output [4:0] io_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] output io_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_load, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_std, // @[issue-slot.scala:73:14] output [1:0] io_uop_iw_state, // @[issue-slot.scala:73:14] output io_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14] output io_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14] output io_uop_is_br, // @[issue-slot.scala:73:14] output io_uop_is_jalr, // @[issue-slot.scala:73:14] output io_uop_is_jal, // @[issue-slot.scala:73:14] output io_uop_is_sfb, // @[issue-slot.scala:73:14] output [15:0] io_uop_br_mask, // @[issue-slot.scala:73:14] output [3:0] io_uop_br_tag, // @[issue-slot.scala:73:14] output [4:0] io_uop_ftq_idx, // @[issue-slot.scala:73:14] output io_uop_edge_inst, // @[issue-slot.scala:73:14] output [5:0] io_uop_pc_lob, // @[issue-slot.scala:73:14] output io_uop_taken, // @[issue-slot.scala:73:14] output [19:0] io_uop_imm_packed, // @[issue-slot.scala:73:14] output [11:0] io_uop_csr_addr, // @[issue-slot.scala:73:14] output [6:0] io_uop_rob_idx, // @[issue-slot.scala:73:14] output [4:0] io_uop_ldq_idx, // @[issue-slot.scala:73:14] output [4:0] io_uop_stq_idx, // @[issue-slot.scala:73:14] output [1:0] io_uop_rxq_idx, // @[issue-slot.scala:73:14] output [6:0] io_uop_pdst, // @[issue-slot.scala:73:14] output [6:0] io_uop_prs1, // @[issue-slot.scala:73:14] output [6:0] io_uop_prs2, // @[issue-slot.scala:73:14] output [6:0] io_uop_prs3, // @[issue-slot.scala:73:14] output [4:0] io_uop_ppred, // @[issue-slot.scala:73:14] output io_uop_prs1_busy, // @[issue-slot.scala:73:14] output io_uop_prs2_busy, // @[issue-slot.scala:73:14] output io_uop_prs3_busy, // @[issue-slot.scala:73:14] output io_uop_ppred_busy, // @[issue-slot.scala:73:14] output [6:0] io_uop_stale_pdst, // @[issue-slot.scala:73:14] output io_uop_exception, // @[issue-slot.scala:73:14] output [63:0] io_uop_exc_cause, // @[issue-slot.scala:73:14] output io_uop_bypassable, // @[issue-slot.scala:73:14] output [4:0] io_uop_mem_cmd, // @[issue-slot.scala:73:14] output [1:0] io_uop_mem_size, // @[issue-slot.scala:73:14] output io_uop_mem_signed, // @[issue-slot.scala:73:14] output io_uop_is_fence, // @[issue-slot.scala:73:14] output io_uop_is_fencei, // @[issue-slot.scala:73:14] output io_uop_is_amo, // @[issue-slot.scala:73:14] output io_uop_uses_ldq, // @[issue-slot.scala:73:14] output io_uop_uses_stq, // @[issue-slot.scala:73:14] output io_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] output io_uop_is_unique, // @[issue-slot.scala:73:14] output io_uop_flush_on_commit, // @[issue-slot.scala:73:14] output io_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] output [5:0] io_uop_ldst, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs1, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs2, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs3, // @[issue-slot.scala:73:14] output io_uop_ldst_val, // @[issue-slot.scala:73:14] output [1:0] io_uop_dst_rtype, // @[issue-slot.scala:73:14] output [1:0] io_uop_lrs1_rtype, // @[issue-slot.scala:73:14] output [1:0] io_uop_lrs2_rtype, // @[issue-slot.scala:73:14] output io_uop_frs3_en, // @[issue-slot.scala:73:14] output io_uop_fp_val, // @[issue-slot.scala:73:14] output io_uop_fp_single, // @[issue-slot.scala:73:14] output io_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] output io_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] output io_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] output io_uop_bp_debug_if, // @[issue-slot.scala:73:14] output io_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] output [1:0] io_uop_debug_fsrc, // @[issue-slot.scala:73:14] output [1:0] io_uop_debug_tsrc, // @[issue-slot.scala:73:14] output io_debug_p1, // @[issue-slot.scala:73:14] output io_debug_p2, // @[issue-slot.scala:73:14] output io_debug_p3, // @[issue-slot.scala:73:14] output io_debug_ppred, // @[issue-slot.scala:73:14] output [1:0] io_debug_state // @[issue-slot.scala:73:14] ); wire io_grant_0 = io_grant; // @[issue-slot.scala:69:7] wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:69:7] wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[issue-slot.scala:69:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:69:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:69:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[issue-slot.scala:69:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[issue-slot.scala:69:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:69:7] wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:69:7] wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:69:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:69:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:69:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:69:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[issue-slot.scala:69:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:69:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:69:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:69:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:69:7] wire io_kill_0 = io_kill; // @[issue-slot.scala:69:7] wire io_clear_0 = io_clear; // @[issue-slot.scala:69:7] wire io_ldspec_miss_0 = io_ldspec_miss; // @[issue-slot.scala:69:7] wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_0_bits_pdst_0 = io_wakeup_ports_0_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_0_bits_poisoned_0 = io_wakeup_ports_0_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_1_bits_pdst_0 = io_wakeup_ports_1_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_1_bits_poisoned_0 = io_wakeup_ports_1_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_2_valid_0 = io_wakeup_ports_2_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_2_bits_pdst_0 = io_wakeup_ports_2_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_2_bits_poisoned_0 = io_wakeup_ports_2_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_3_valid_0 = io_wakeup_ports_3_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_3_bits_pdst_0 = io_wakeup_ports_3_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_3_bits_poisoned_0 = io_wakeup_ports_3_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_4_valid_0 = io_wakeup_ports_4_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_4_bits_pdst_0 = io_wakeup_ports_4_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_4_bits_poisoned_0 = io_wakeup_ports_4_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_5_valid_0 = io_wakeup_ports_5_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_5_bits_pdst_0 = io_wakeup_ports_5_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_5_bits_poisoned_0 = io_wakeup_ports_5_bits_poisoned; // @[issue-slot.scala:69:7] wire io_wakeup_ports_6_valid_0 = io_wakeup_ports_6_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_6_bits_pdst_0 = io_wakeup_ports_6_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_6_bits_poisoned_0 = io_wakeup_ports_6_bits_poisoned; // @[issue-slot.scala:69:7] wire io_spec_ld_wakeup_0_valid_0 = io_spec_ld_wakeup_0_valid; // @[issue-slot.scala:69:7] wire [6:0] io_spec_ld_wakeup_0_bits_0 = io_spec_ld_wakeup_0_bits; // @[issue-slot.scala:69:7] wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_uopc_0 = io_in_uop_bits_uopc; // @[issue-slot.scala:69:7] wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:69:7] wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:69:7] wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_iq_type_0 = io_in_uop_bits_iq_type; // @[issue-slot.scala:69:7] wire [9:0] io_in_uop_bits_fu_code_0 = io_in_uop_bits_fu_code; // @[issue-slot.scala:69:7] wire [3:0] io_in_uop_bits_ctrl_br_type_0 = io_in_uop_bits_ctrl_br_type; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_ctrl_op1_sel_0 = io_in_uop_bits_ctrl_op1_sel; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_op2_sel_0 = io_in_uop_bits_ctrl_op2_sel; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_imm_sel_0 = io_in_uop_bits_ctrl_imm_sel; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ctrl_op_fcn_0 = io_in_uop_bits_ctrl_op_fcn; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_fcn_dw_0 = io_in_uop_bits_ctrl_fcn_dw; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_csr_cmd_0 = io_in_uop_bits_ctrl_csr_cmd; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_load_0 = io_in_uop_bits_ctrl_is_load; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_sta_0 = io_in_uop_bits_ctrl_is_sta; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_std_0 = io_in_uop_bits_ctrl_is_std; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_iw_state_0 = io_in_uop_bits_iw_state; // @[issue-slot.scala:69:7] wire io_in_uop_bits_iw_p1_poisoned_0 = io_in_uop_bits_iw_p1_poisoned; // @[issue-slot.scala:69:7] wire io_in_uop_bits_iw_p2_poisoned_0 = io_in_uop_bits_iw_p2_poisoned; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_br_0 = io_in_uop_bits_is_br; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_jalr_0 = io_in_uop_bits_is_jalr; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_jal_0 = io_in_uop_bits_is_jal; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:69:7] wire [15:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:69:7] wire [3:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:69:7] wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:69:7] wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:69:7] wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:69:7] wire [11:0] io_in_uop_bits_csr_addr_0 = io_in_uop_bits_csr_addr; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ppred_0 = io_in_uop_bits_ppred; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:69:7] wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:69:7] wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bypassable_0 = io_in_uop_bits_bypassable; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:69:7] wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:69:7] wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:69:7] wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:69:7] wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ldst_val_0 = io_in_uop_bits_ldst_val; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:69:7] wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:69:7] wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:69:7] wire io_in_uop_bits_fp_single_0 = io_in_uop_bits_fp_single; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:69:7] wire io_pred_wakeup_port_valid = 1'h0; // @[issue-slot.scala:69:7] wire slot_uop_uop_is_rvc = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_fcn_dw = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_load = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_sta = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_std = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_iw_p1_poisoned = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_iw_p2_poisoned = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_br = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_jalr = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_jal = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_sfb = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_edge_inst = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_taken = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs1_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs2_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs3_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ppred_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_exception = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bypassable = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_mem_signed = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_fence = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_fencei = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_amo = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_uses_ldq = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_uses_stq = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_sys_pc2epc = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_unique = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_flush_on_commit = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ldst_is_rs1 = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ldst_val = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_frs3_en = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_fp_val = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_fp_single = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_pf_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_ae_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_ma_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bp_debug_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bp_xcpt_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_cs_fcn_dw = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_load = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_sta = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_std = 1'h0; // @[consts.scala:279:18] wire [4:0] io_pred_wakeup_port_bits = 5'h0; // @[issue-slot.scala:69:7] wire [4:0] slot_uop_uop_ctrl_op_fcn = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_ftq_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_ldq_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_stq_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_ppred = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_mem_cmd = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_cs_op_fcn = 5'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_uop_iq_type = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_op2_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_imm_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_csr_cmd = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_cs_op2_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_cs_imm_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_cs_csr_cmd = 3'h0; // @[consts.scala:279:18] wire [1:0] slot_uop_uop_ctrl_op1_sel = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_iw_state = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_rxq_idx = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_mem_size = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_lrs1_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_lrs2_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_debug_fsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_debug_tsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_cs_op1_sel = 2'h0; // @[consts.scala:279:18] wire [3:0] slot_uop_uop_ctrl_br_type = 4'h0; // @[consts.scala:269:19] wire [3:0] slot_uop_uop_br_tag = 4'h0; // @[consts.scala:269:19] wire [3:0] slot_uop_cs_br_type = 4'h0; // @[consts.scala:279:18] wire [1:0] slot_uop_uop_dst_rtype = 2'h2; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_pc_lob = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_ldst = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs3 = 6'h0; // @[consts.scala:269:19] wire [63:0] slot_uop_uop_exc_cause = 64'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_uopc = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_rob_idx = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_pdst = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_prs1 = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_prs2 = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_prs3 = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_stale_pdst = 7'h0; // @[consts.scala:269:19] wire [11:0] slot_uop_uop_csr_addr = 12'h0; // @[consts.scala:269:19] wire [19:0] slot_uop_uop_imm_packed = 20'h0; // @[consts.scala:269:19] wire [15:0] slot_uop_uop_br_mask = 16'h0; // @[consts.scala:269:19] wire [9:0] slot_uop_uop_fu_code = 10'h0; // @[consts.scala:269:19] wire [39:0] slot_uop_uop_debug_pc = 40'h0; // @[consts.scala:269:19] wire [31:0] slot_uop_uop_inst = 32'h0; // @[consts.scala:269:19] wire [31:0] slot_uop_uop_debug_inst = 32'h0; // @[consts.scala:269:19] wire _io_valid_T; // @[issue-slot.scala:79:24] wire _io_will_be_valid_T_4; // @[issue-slot.scala:262:32] wire _io_request_hp_T; // @[issue-slot.scala:243:31] wire [6:0] next_uopc; // @[issue-slot.scala:82:29] wire [1:0] next_state; // @[issue-slot.scala:81:29] wire [15:0] next_br_mask; // @[util.scala:85:25] wire _io_out_uop_prs1_busy_T; // @[issue-slot.scala:270:28] wire _io_out_uop_prs2_busy_T; // @[issue-slot.scala:271:28] wire _io_out_uop_prs3_busy_T; // @[issue-slot.scala:272:28] wire _io_out_uop_ppred_busy_T; // @[issue-slot.scala:273:28] wire [1:0] next_lrs1_rtype; // @[issue-slot.scala:83:29] wire [1:0] next_lrs2_rtype; // @[issue-slot.scala:84:29] wire [3:0] io_out_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_uopc_0; // @[issue-slot.scala:69:7] wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:69:7] wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_rvc_0; // @[issue-slot.scala:69:7] wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_iq_type_0; // @[issue-slot.scala:69:7] wire [9:0] io_out_uop_fu_code_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_iw_state_0; // @[issue-slot.scala:69:7] wire io_out_uop_iw_p1_poisoned_0; // @[issue-slot.scala:69:7] wire io_out_uop_iw_p2_poisoned_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_br_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_jalr_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_jal_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_sfb_0; // @[issue-slot.scala:69:7] wire [15:0] io_out_uop_br_mask_0; // @[issue-slot.scala:69:7] wire [3:0] io_out_uop_br_tag_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:69:7] wire io_out_uop_edge_inst_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:69:7] wire io_out_uop_taken_0; // @[issue-slot.scala:69:7] wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:69:7] wire [11:0] io_out_uop_csr_addr_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_pdst_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_prs1_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_prs2_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_prs3_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ppred_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:69:7] wire io_out_uop_exception_0; // @[issue-slot.scala:69:7] wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:69:7] wire io_out_uop_bypassable_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:69:7] wire io_out_uop_mem_signed_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_fence_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_fencei_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_amo_0; // @[issue-slot.scala:69:7] wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:69:7] wire io_out_uop_uses_stq_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_unique_0; // @[issue-slot.scala:69:7] wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:69:7] wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:69:7] wire io_out_uop_ldst_val_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7] wire io_out_uop_frs3_en_0; // @[issue-slot.scala:69:7] wire io_out_uop_fp_val_0; // @[issue-slot.scala:69:7] wire io_out_uop_fp_single_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:69:7] wire [3:0] io_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_uopc_0; // @[issue-slot.scala:69:7] wire [31:0] io_uop_inst_0; // @[issue-slot.scala:69:7] wire [31:0] io_uop_debug_inst_0; // @[issue-slot.scala:69:7] wire io_uop_is_rvc_0; // @[issue-slot.scala:69:7] wire [39:0] io_uop_debug_pc_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_iq_type_0; // @[issue-slot.scala:69:7] wire [9:0] io_uop_fu_code_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_iw_state_0; // @[issue-slot.scala:69:7] wire io_uop_iw_p1_poisoned_0; // @[issue-slot.scala:69:7] wire io_uop_iw_p2_poisoned_0; // @[issue-slot.scala:69:7] wire io_uop_is_br_0; // @[issue-slot.scala:69:7] wire io_uop_is_jalr_0; // @[issue-slot.scala:69:7] wire io_uop_is_jal_0; // @[issue-slot.scala:69:7] wire io_uop_is_sfb_0; // @[issue-slot.scala:69:7] wire [15:0] io_uop_br_mask_0; // @[issue-slot.scala:69:7] wire [3:0] io_uop_br_tag_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ftq_idx_0; // @[issue-slot.scala:69:7] wire io_uop_edge_inst_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_pc_lob_0; // @[issue-slot.scala:69:7] wire io_uop_taken_0; // @[issue-slot.scala:69:7] wire [19:0] io_uop_imm_packed_0; // @[issue-slot.scala:69:7] wire [11:0] io_uop_csr_addr_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_rob_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ldq_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_stq_idx_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_rxq_idx_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_pdst_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_prs1_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_prs2_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_prs3_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ppred_0; // @[issue-slot.scala:69:7] wire io_uop_prs1_busy_0; // @[issue-slot.scala:69:7] wire io_uop_prs2_busy_0; // @[issue-slot.scala:69:7] wire io_uop_prs3_busy_0; // @[issue-slot.scala:69:7] wire io_uop_ppred_busy_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_stale_pdst_0; // @[issue-slot.scala:69:7] wire io_uop_exception_0; // @[issue-slot.scala:69:7] wire [63:0] io_uop_exc_cause_0; // @[issue-slot.scala:69:7] wire io_uop_bypassable_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_mem_cmd_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_mem_size_0; // @[issue-slot.scala:69:7] wire io_uop_mem_signed_0; // @[issue-slot.scala:69:7] wire io_uop_is_fence_0; // @[issue-slot.scala:69:7] wire io_uop_is_fencei_0; // @[issue-slot.scala:69:7] wire io_uop_is_amo_0; // @[issue-slot.scala:69:7] wire io_uop_uses_ldq_0; // @[issue-slot.scala:69:7] wire io_uop_uses_stq_0; // @[issue-slot.scala:69:7] wire io_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7] wire io_uop_is_unique_0; // @[issue-slot.scala:69:7] wire io_uop_flush_on_commit_0; // @[issue-slot.scala:69:7] wire io_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_ldst_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs2_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs3_0; // @[issue-slot.scala:69:7] wire io_uop_ldst_val_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_dst_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7] wire io_uop_frs3_en_0; // @[issue-slot.scala:69:7] wire io_uop_fp_val_0; // @[issue-slot.scala:69:7] wire io_uop_fp_single_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7] wire io_uop_bp_debug_if_0; // @[issue-slot.scala:69:7] wire io_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_debug_fsrc_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_debug_tsrc_0; // @[issue-slot.scala:69:7] wire io_debug_p1_0; // @[issue-slot.scala:69:7] wire io_debug_p2_0; // @[issue-slot.scala:69:7] wire io_debug_p3_0; // @[issue-slot.scala:69:7] wire io_debug_ppred_0; // @[issue-slot.scala:69:7] wire [1:0] io_debug_state_0; // @[issue-slot.scala:69:7] wire io_valid_0; // @[issue-slot.scala:69:7] wire io_will_be_valid_0; // @[issue-slot.scala:69:7] wire io_request_0; // @[issue-slot.scala:69:7] wire io_request_hp_0; // @[issue-slot.scala:69:7] assign io_out_uop_iw_state_0 = next_state; // @[issue-slot.scala:69:7, :81:29] assign io_out_uop_uopc_0 = next_uopc; // @[issue-slot.scala:69:7, :82:29] assign io_out_uop_lrs1_rtype_0 = next_lrs1_rtype; // @[issue-slot.scala:69:7, :83:29] assign io_out_uop_lrs2_rtype_0 = next_lrs2_rtype; // @[issue-slot.scala:69:7, :84:29] reg [1:0] state; // @[issue-slot.scala:86:22] assign io_debug_state_0 = state; // @[issue-slot.scala:69:7, :86:22] reg p1; // @[issue-slot.scala:87:22] assign io_debug_p1_0 = p1; // @[issue-slot.scala:69:7, :87:22] wire next_p1 = p1; // @[issue-slot.scala:87:22, :163:25] reg p2; // @[issue-slot.scala:88:22] assign io_debug_p2_0 = p2; // @[issue-slot.scala:69:7, :88:22] wire next_p2 = p2; // @[issue-slot.scala:88:22, :164:25] reg p3; // @[issue-slot.scala:89:22] assign io_debug_p3_0 = p3; // @[issue-slot.scala:69:7, :89:22] wire next_p3 = p3; // @[issue-slot.scala:89:22, :165:25] reg ppred; // @[issue-slot.scala:90:22] assign io_debug_ppred_0 = ppred; // @[issue-slot.scala:69:7, :90:22] wire next_ppred = ppred; // @[issue-slot.scala:90:22, :166:28] reg p1_poisoned; // @[issue-slot.scala:95:28] assign io_out_uop_iw_p1_poisoned_0 = p1_poisoned; // @[issue-slot.scala:69:7, :95:28] assign io_uop_iw_p1_poisoned_0 = p1_poisoned; // @[issue-slot.scala:69:7, :95:28] reg p2_poisoned; // @[issue-slot.scala:96:28] assign io_out_uop_iw_p2_poisoned_0 = p2_poisoned; // @[issue-slot.scala:69:7, :96:28] assign io_uop_iw_p2_poisoned_0 = p2_poisoned; // @[issue-slot.scala:69:7, :96:28] wire next_p1_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p1_poisoned_0 : p1_poisoned; // @[issue-slot.scala:69:7, :95:28, :99:29] wire next_p2_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p2_poisoned_0 : p2_poisoned; // @[issue-slot.scala:69:7, :96:28, :100:29] reg [6:0] slot_uop_uopc; // @[issue-slot.scala:102:25] reg [31:0] slot_uop_inst; // @[issue-slot.scala:102:25] assign io_out_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25] reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:102:25] assign io_out_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_rvc; // @[issue-slot.scala:102:25] assign io_out_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25] reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_iq_type; // @[issue-slot.scala:102:25] assign io_out_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25] assign io_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25] reg [9:0] slot_uop_fu_code; // @[issue-slot.scala:102:25] assign io_out_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25] reg [3:0] slot_uop_ctrl_br_type; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_ctrl_op1_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_op2_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_imm_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ctrl_op_fcn; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_load; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_sta; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_std; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_iw_state; // @[issue-slot.scala:102:25] assign io_uop_iw_state_0 = slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_iw_p1_poisoned; // @[issue-slot.scala:102:25] reg slot_uop_iw_p2_poisoned; // @[issue-slot.scala:102:25] reg slot_uop_is_br; // @[issue-slot.scala:102:25] assign io_out_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_jalr; // @[issue-slot.scala:102:25] assign io_out_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_jal; // @[issue-slot.scala:102:25] assign io_out_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_sfb; // @[issue-slot.scala:102:25] assign io_out_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25] reg [15:0] slot_uop_br_mask; // @[issue-slot.scala:102:25] assign io_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25] reg [3:0] slot_uop_br_tag; // @[issue-slot.scala:102:25] assign io_out_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25] assign io_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ftq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_edge_inst; // @[issue-slot.scala:102:25] assign io_out_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:102:25] assign io_out_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25] assign io_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_taken; // @[issue-slot.scala:102:25] assign io_out_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25] assign io_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25] reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:102:25] assign io_out_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25] assign io_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25] reg [11:0] slot_uop_csr_addr; // @[issue-slot.scala:102:25] assign io_out_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25] assign io_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_rob_idx; // @[issue-slot.scala:102:25] assign io_out_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ldq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_stq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_pdst; // @[issue-slot.scala:102:25] assign io_out_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_prs1; // @[issue-slot.scala:102:25] assign io_out_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_prs2; // @[issue-slot.scala:102:25] assign io_out_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_prs3; // @[issue-slot.scala:102:25] assign io_out_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ppred; // @[issue-slot.scala:102:25] assign io_out_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs1_busy; // @[issue-slot.scala:102:25] assign io_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs2_busy; // @[issue-slot.scala:102:25] assign io_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs3_busy; // @[issue-slot.scala:102:25] assign io_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ppred_busy; // @[issue-slot.scala:102:25] assign io_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_stale_pdst; // @[issue-slot.scala:102:25] assign io_out_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_exception; // @[issue-slot.scala:102:25] assign io_out_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25] assign io_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25] reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:102:25] assign io_out_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25] assign io_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bypassable; // @[issue-slot.scala:102:25] assign io_out_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:102:25] assign io_out_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:102:25] assign io_out_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_mem_signed; // @[issue-slot.scala:102:25] assign io_out_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_fence; // @[issue-slot.scala:102:25] assign io_out_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_fencei; // @[issue-slot.scala:102:25] assign io_out_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_amo; // @[issue-slot.scala:102:25] assign io_out_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_uses_ldq; // @[issue-slot.scala:102:25] assign io_out_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25] assign io_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_uses_stq; // @[issue-slot.scala:102:25] assign io_out_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25] assign io_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:102:25] assign io_out_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_unique; // @[issue-slot.scala:102:25] assign io_out_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_flush_on_commit; // @[issue-slot.scala:102:25] assign io_out_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25] assign io_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_ldst; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:102:25] assign io_out_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:102:25] assign io_out_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:102:25] assign io_out_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ldst_val; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:102:25] assign io_out_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25] assign io_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:102:25] reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:102:25] reg slot_uop_frs3_en; // @[issue-slot.scala:102:25] assign io_out_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25] assign io_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_fp_val; // @[issue-slot.scala:102:25] assign io_out_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_fp_single; // @[issue-slot.scala:102:25] assign io_out_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bp_debug_if; // @[issue-slot.scala:102:25] assign io_out_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:102:25] assign io_out_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_debug_fsrc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_debug_tsrc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25] wire [6:0] next_uop_uopc = io_in_uop_valid_0 ? io_in_uop_bits_uopc_0 : slot_uop_uopc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [31:0] next_uop_inst = io_in_uop_valid_0 ? io_in_uop_bits_inst_0 : slot_uop_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [31:0] next_uop_debug_inst = io_in_uop_valid_0 ? io_in_uop_bits_debug_inst_0 : slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_rvc = io_in_uop_valid_0 ? io_in_uop_bits_is_rvc_0 : slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [39:0] next_uop_debug_pc = io_in_uop_valid_0 ? io_in_uop_bits_debug_pc_0 : slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_iq_type = io_in_uop_valid_0 ? io_in_uop_bits_iq_type_0 : slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [9:0] next_uop_fu_code = io_in_uop_valid_0 ? io_in_uop_bits_fu_code_0 : slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [3:0] next_uop_ctrl_br_type = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_br_type_0 : slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_ctrl_op1_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op1_sel_0 : slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_op2_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op2_sel_0 : slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_imm_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_imm_sel_0 : slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ctrl_op_fcn = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op_fcn_0 : slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_fcn_dw = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_fcn_dw_0 : slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_csr_cmd = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_csr_cmd_0 : slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_load = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_load_0 : slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_sta = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_sta_0 : slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_std = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_std_0 : slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_iw_state = io_in_uop_valid_0 ? io_in_uop_bits_iw_state_0 : slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_iw_p1_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p1_poisoned_0 : slot_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_iw_p2_poisoned = io_in_uop_valid_0 ? io_in_uop_bits_iw_p2_poisoned_0 : slot_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_br = io_in_uop_valid_0 ? io_in_uop_bits_is_br_0 : slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_jalr = io_in_uop_valid_0 ? io_in_uop_bits_is_jalr_0 : slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_jal = io_in_uop_valid_0 ? io_in_uop_bits_is_jal_0 : slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_sfb = io_in_uop_valid_0 ? io_in_uop_bits_is_sfb_0 : slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [15:0] next_uop_br_mask = io_in_uop_valid_0 ? io_in_uop_bits_br_mask_0 : slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [3:0] next_uop_br_tag = io_in_uop_valid_0 ? io_in_uop_bits_br_tag_0 : slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ftq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ftq_idx_0 : slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_edge_inst = io_in_uop_valid_0 ? io_in_uop_bits_edge_inst_0 : slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_pc_lob = io_in_uop_valid_0 ? io_in_uop_bits_pc_lob_0 : slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_taken = io_in_uop_valid_0 ? io_in_uop_bits_taken_0 : slot_uop_taken; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [19:0] next_uop_imm_packed = io_in_uop_valid_0 ? io_in_uop_bits_imm_packed_0 : slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [11:0] next_uop_csr_addr = io_in_uop_valid_0 ? io_in_uop_bits_csr_addr_0 : slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_rob_idx = io_in_uop_valid_0 ? io_in_uop_bits_rob_idx_0 : slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ldq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ldq_idx_0 : slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_stq_idx = io_in_uop_valid_0 ? io_in_uop_bits_stq_idx_0 : slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_rxq_idx = io_in_uop_valid_0 ? io_in_uop_bits_rxq_idx_0 : slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_pdst = io_in_uop_valid_0 ? io_in_uop_bits_pdst_0 : slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_prs1 = io_in_uop_valid_0 ? io_in_uop_bits_prs1_0 : slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_prs2 = io_in_uop_valid_0 ? io_in_uop_bits_prs2_0 : slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_prs3 = io_in_uop_valid_0 ? io_in_uop_bits_prs3_0 : slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ppred = io_in_uop_valid_0 ? io_in_uop_bits_ppred_0 : slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs1_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs1_busy_0 : slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs2_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs2_busy_0 : slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs3_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs3_busy_0 : slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ppred_busy = io_in_uop_valid_0 ? io_in_uop_bits_ppred_busy_0 : slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_stale_pdst = io_in_uop_valid_0 ? io_in_uop_bits_stale_pdst_0 : slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_exception = io_in_uop_valid_0 ? io_in_uop_bits_exception_0 : slot_uop_exception; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [63:0] next_uop_exc_cause = io_in_uop_valid_0 ? io_in_uop_bits_exc_cause_0 : slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bypassable = io_in_uop_valid_0 ? io_in_uop_bits_bypassable_0 : slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_mem_cmd = io_in_uop_valid_0 ? io_in_uop_bits_mem_cmd_0 : slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_mem_size = io_in_uop_valid_0 ? io_in_uop_bits_mem_size_0 : slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_mem_signed = io_in_uop_valid_0 ? io_in_uop_bits_mem_signed_0 : slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_fence = io_in_uop_valid_0 ? io_in_uop_bits_is_fence_0 : slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_fencei = io_in_uop_valid_0 ? io_in_uop_bits_is_fencei_0 : slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_amo = io_in_uop_valid_0 ? io_in_uop_bits_is_amo_0 : slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_uses_ldq = io_in_uop_valid_0 ? io_in_uop_bits_uses_ldq_0 : slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_uses_stq = io_in_uop_valid_0 ? io_in_uop_bits_uses_stq_0 : slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_sys_pc2epc = io_in_uop_valid_0 ? io_in_uop_bits_is_sys_pc2epc_0 : slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_unique = io_in_uop_valid_0 ? io_in_uop_bits_is_unique_0 : slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_flush_on_commit = io_in_uop_valid_0 ? io_in_uop_bits_flush_on_commit_0 : slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ldst_is_rs1 = io_in_uop_valid_0 ? io_in_uop_bits_ldst_is_rs1_0 : slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_ldst = io_in_uop_valid_0 ? io_in_uop_bits_ldst_0 : slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs1 = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_0 : slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs2 = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_0 : slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs3 = io_in_uop_valid_0 ? io_in_uop_bits_lrs3_0 : slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ldst_val = io_in_uop_valid_0 ? io_in_uop_bits_ldst_val_0 : slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_dst_rtype = io_in_uop_valid_0 ? io_in_uop_bits_dst_rtype_0 : slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_lrs1_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_rtype_0 : slot_uop_lrs1_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_lrs2_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_rtype_0 : slot_uop_lrs2_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_frs3_en = io_in_uop_valid_0 ? io_in_uop_bits_frs3_en_0 : slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_fp_val = io_in_uop_valid_0 ? io_in_uop_bits_fp_val_0 : slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_fp_single = io_in_uop_valid_0 ? io_in_uop_bits_fp_single_0 : slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_pf_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_pf_if_0 : slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_ae_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ae_if_0 : slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_ma_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ma_if_0 : slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bp_debug_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_debug_if_0 : slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bp_xcpt_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_xcpt_if_0 : slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_debug_fsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_fsrc_0 : slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_debug_tsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_tsrc_0 : slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire _T_11 = state == 2'h2; // @[issue-slot.scala:86:22, :134:25] wire _T_7 = io_grant_0 & state == 2'h1 | io_grant_0 & _T_11 & p1 & p2 & ppred; // @[issue-slot.scala:69:7, :86:22, :87:22, :88:22, :90:22, :133:{26,36,52}, :134:{15,25,40,46,52}] wire _T_12 = io_grant_0 & _T_11; // @[issue-slot.scala:69:7, :134:25, :139:25] wire _T_14 = io_ldspec_miss_0 & (p1_poisoned | p2_poisoned); // @[issue-slot.scala:69:7, :95:28, :96:28, :140:{28,44}] wire _GEN = _T_12 & ~_T_14; // @[issue-slot.scala:126:14, :139:{25,51}, :140:{11,28,62}, :141:18] wire _GEN_0 = io_kill_0 | _T_7; // @[issue-slot.scala:69:7, :102:25, :131:18, :133:52, :134:63, :139:51] wire _GEN_1 = _GEN_0 | ~(_T_12 & ~_T_14 & p1); // @[issue-slot.scala:87:22, :102:25, :131:18, :134:63, :139:{25,51}, :140:{11,28,62}, :142:17, :143:23] assign next_uopc = _GEN_1 ? slot_uop_uopc : 7'h3; // @[issue-slot.scala:82:29, :102:25, :131:18, :134:63, :139:51] assign next_lrs1_rtype = _GEN_1 ? slot_uop_lrs1_rtype : 2'h2; // @[issue-slot.scala:83:29, :102:25, :131:18, :134:63, :139:51] wire _GEN_2 = _GEN_0 | ~_GEN | p1; // @[issue-slot.scala:87:22, :102:25, :126:14, :131:18, :134:63, :139:51, :140:62, :141:18, :142:17] assign next_lrs2_rtype = _GEN_2 ? slot_uop_lrs2_rtype : 2'h2; // @[issue-slot.scala:84:29, :102:25, :131:18, :134:63, :139:51, :140:62, :142:17] wire _p1_T = ~io_in_uop_bits_prs1_busy_0; // @[issue-slot.scala:69:7, :169:11] wire _p2_T = ~io_in_uop_bits_prs2_busy_0; // @[issue-slot.scala:69:7, :170:11] wire _p3_T = ~io_in_uop_bits_prs3_busy_0; // @[issue-slot.scala:69:7, :171:11] wire _ppred_T = ~io_in_uop_bits_ppred_busy_0; // @[issue-slot.scala:69:7, :172:14] wire _T_22 = io_ldspec_miss_0 & next_p1_poisoned; // @[issue-slot.scala:69:7, :99:29, :175:24] wire _T_27 = io_ldspec_miss_0 & next_p2_poisoned; // @[issue-slot.scala:69:7, :100:29, :179:24] wire _T_85 = io_spec_ld_wakeup_0_valid_0 & io_spec_ld_wakeup_0_bits_0 == next_uop_prs1 & next_uop_lrs1_rtype == 2'h0; // @[issue-slot.scala:69:7, :103:21, :209:38, :210:{33,51}, :211:27] wire _T_93 = io_spec_ld_wakeup_0_valid_0 & io_spec_ld_wakeup_0_bits_0 == next_uop_prs2 & next_uop_lrs2_rtype == 2'h0; // @[issue-slot.scala:69:7, :103:21, :216:38, :217:{33,51}, :218:27]
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module PE_390( // @[PE.scala:31:7] input clock, // @[PE.scala:31:7] input reset, // @[PE.scala:31:7] input [7:0] io_in_a, // @[PE.scala:35:14] input [19:0] io_in_b, // @[PE.scala:35:14] input [19:0] io_in_d, // @[PE.scala:35:14] output [7:0] io_out_a, // @[PE.scala:35:14] output [19:0] io_out_b, // @[PE.scala:35:14] output [19:0] io_out_c, // @[PE.scala:35:14] input io_in_control_dataflow, // @[PE.scala:35:14] input io_in_control_propagate, // @[PE.scala:35:14] input [4:0] io_in_control_shift, // @[PE.scala:35:14] output io_out_control_dataflow, // @[PE.scala:35:14] output io_out_control_propagate, // @[PE.scala:35:14] output [4:0] io_out_control_shift, // @[PE.scala:35:14] input [2:0] io_in_id, // @[PE.scala:35:14] output [2:0] io_out_id, // @[PE.scala:35:14] input io_in_last, // @[PE.scala:35:14] output io_out_last, // @[PE.scala:35:14] input io_in_valid, // @[PE.scala:35:14] output io_out_valid // @[PE.scala:35:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7] wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7] wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7] wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7] wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7] wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7] wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7] wire io_in_last_0 = io_in_last; // @[PE.scala:31:7] wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7] wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7] wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60] wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60] wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7] wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37] wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37] wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35] wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7] wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7] wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7] wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7] wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7] wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7] wire [19:0] io_out_b_0; // @[PE.scala:31:7] wire [19:0] io_out_c_0; // @[PE.scala:31:7] reg [7:0] c1; // @[PE.scala:70:15] wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15] wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38] reg [7:0] c2; // @[PE.scala:71:15] wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15] wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38] reg last_s; // @[PE.scala:89:25] wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21] wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25] wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25] wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32] wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32] wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25] wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15] wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}] wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25] wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27] wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27] wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25] wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15] wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15] assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37] wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37] wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}] wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15] wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}] wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15] wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15] assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37] wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37] wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}] wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38] wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16] wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38] wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35] wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35] always @(posedge clock) begin // @[PE.scala:31:7] if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8] c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15] if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8] c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15] if (io_in_valid_0) // @[PE.scala:31:7] last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25] always @(posedge) MacUnit_134 mac_unit ( // @[PE.scala:64:24] .clock (clock), .reset (reset), .io_in_a (io_in_a_0), // @[PE.scala:31:7] .io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}] .io_in_c (io_in_b_0), // @[PE.scala:31:7] .io_out_d (io_out_b_0) ); // @[PE.scala:64:24] assign io_out_a = io_out_a_0; // @[PE.scala:31:7] assign io_out_b = io_out_b_0; // @[PE.scala:31:7] assign io_out_c = io_out_c_0; // @[PE.scala:31:7] assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7] assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7] assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7] assign io_out_id = io_out_id_0; // @[PE.scala:31:7] assign io_out_last = io_out_last_0; // @[PE.scala:31:7] assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File MSHR.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import freechips.rocketchip.tilelink._ import TLPermissions._ import TLMessages._ import MetaData._ import chisel3.PrintableHelper import chisel3.experimental.dataview._ class ScheduleRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val a = Valid(new SourceARequest(params)) val b = Valid(new SourceBRequest(params)) val c = Valid(new SourceCRequest(params)) val d = Valid(new SourceDRequest(params)) val e = Valid(new SourceERequest(params)) val x = Valid(new SourceXRequest(params)) val dir = Valid(new DirectoryWrite(params)) val reload = Bool() // get next request via allocate (if any) } class MSHRStatus(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val way = UInt(params.wayBits.W) val blockB = Bool() val nestB = Bool() val blockC = Bool() val nestC = Bool() } class NestedWriteback(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val set = UInt(params.setBits.W) val tag = UInt(params.tagBits.W) val b_toN = Bool() // nested Probes may unhit us val b_toB = Bool() // nested Probes may demote us val b_clr_dirty = Bool() // nested Probes clear dirty val c_set_dirty = Bool() // nested Releases MAY set dirty } sealed trait CacheState { val code = CacheState.index.U CacheState.index = CacheState.index + 1 } object CacheState { var index = 0 } case object S_INVALID extends CacheState case object S_BRANCH extends CacheState case object S_BRANCH_C extends CacheState case object S_TIP extends CacheState case object S_TIP_C extends CacheState case object S_TIP_CD extends CacheState case object S_TIP_D extends CacheState case object S_TRUNK_C extends CacheState case object S_TRUNK_CD extends CacheState class MSHR(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val allocate = Flipped(Valid(new AllocateRequest(params))) // refills MSHR for next cycle val directory = Flipped(Valid(new DirectoryResult(params))) // triggers schedule setup val status = Valid(new MSHRStatus(params)) val schedule = Decoupled(new ScheduleRequest(params)) val sinkc = Flipped(Valid(new SinkCResponse(params))) val sinkd = Flipped(Valid(new SinkDResponse(params))) val sinke = Flipped(Valid(new SinkEResponse(params))) val nestedwb = Flipped(new NestedWriteback(params)) }) val request_valid = RegInit(false.B) val request = Reg(new FullRequest(params)) val meta_valid = RegInit(false.B) val meta = Reg(new DirectoryResult(params)) // Define which states are valid when (meta_valid) { when (meta.state === INVALID) { assert (!meta.clients.orR) assert (!meta.dirty) } when (meta.state === BRANCH) { assert (!meta.dirty) } when (meta.state === TRUNK) { assert (meta.clients.orR) assert ((meta.clients & (meta.clients - 1.U)) === 0.U) // at most one } when (meta.state === TIP) { // noop } } // Completed transitions (s_ = scheduled), (w_ = waiting) val s_rprobe = RegInit(true.B) // B val w_rprobeackfirst = RegInit(true.B) val w_rprobeacklast = RegInit(true.B) val s_release = RegInit(true.B) // CW w_rprobeackfirst val w_releaseack = RegInit(true.B) val s_pprobe = RegInit(true.B) // B val s_acquire = RegInit(true.B) // A s_release, s_pprobe [1] val s_flush = RegInit(true.B) // X w_releaseack val w_grantfirst = RegInit(true.B) val w_grantlast = RegInit(true.B) val w_grant = RegInit(true.B) // first | last depending on wormhole val w_pprobeackfirst = RegInit(true.B) val w_pprobeacklast = RegInit(true.B) val w_pprobeack = RegInit(true.B) // first | last depending on wormhole val s_probeack = RegInit(true.B) // C w_pprobeackfirst (mutually exclusive with next two s_*) val s_grantack = RegInit(true.B) // E w_grantfirst ... CAN require both outE&inD to service outD val s_execute = RegInit(true.B) // D w_pprobeack, w_grant val w_grantack = RegInit(true.B) val s_writeback = RegInit(true.B) // W w_* // [1]: We cannot issue outer Acquire while holding blockB (=> outA can stall) // However, inB and outC are higher priority than outB, so s_release and s_pprobe // may be safely issued while blockB. Thus we must NOT try to schedule the // potentially stuck s_acquire with either of them (scheduler is all or none). // Meta-data that we discover underway val sink = Reg(UInt(params.outer.bundle.sinkBits.W)) val gotT = Reg(Bool()) val bad_grant = Reg(Bool()) val probes_done = Reg(UInt(params.clientBits.W)) val probes_toN = Reg(UInt(params.clientBits.W)) val probes_noT = Reg(Bool()) // When a nested transaction completes, update our meta data when (meta_valid && meta.state =/= INVALID && io.nestedwb.set === request.set && io.nestedwb.tag === meta.tag) { when (io.nestedwb.b_clr_dirty) { meta.dirty := false.B } when (io.nestedwb.c_set_dirty) { meta.dirty := true.B } when (io.nestedwb.b_toB) { meta.state := BRANCH } when (io.nestedwb.b_toN) { meta.hit := false.B } } // Scheduler status io.status.valid := request_valid io.status.bits.set := request.set io.status.bits.tag := request.tag io.status.bits.way := meta.way io.status.bits.blockB := !meta_valid || ((!w_releaseack || !w_rprobeacklast || !w_pprobeacklast) && !w_grantfirst) io.status.bits.nestB := meta_valid && w_releaseack && w_rprobeacklast && w_pprobeacklast && !w_grantfirst // The above rules ensure we will block and not nest an outer probe while still doing our // own inner probes. Thus every probe wakes exactly one MSHR. io.status.bits.blockC := !meta_valid io.status.bits.nestC := meta_valid && (!w_rprobeackfirst || !w_pprobeackfirst || !w_grantfirst) // The w_grantfirst in nestC is necessary to deal with: // acquire waiting for grant, inner release gets queued, outer probe -> inner probe -> deadlock // ... this is possible because the release+probe can be for same set, but different tag // We can only demand: block, nest, or queue assert (!io.status.bits.nestB || !io.status.bits.blockB) assert (!io.status.bits.nestC || !io.status.bits.blockC) // Scheduler requests val no_wait = w_rprobeacklast && w_releaseack && w_grantlast && w_pprobeacklast && w_grantack io.schedule.bits.a.valid := !s_acquire && s_release && s_pprobe io.schedule.bits.b.valid := !s_rprobe || !s_pprobe io.schedule.bits.c.valid := (!s_release && w_rprobeackfirst) || (!s_probeack && w_pprobeackfirst) io.schedule.bits.d.valid := !s_execute && w_pprobeack && w_grant io.schedule.bits.e.valid := !s_grantack && w_grantfirst io.schedule.bits.x.valid := !s_flush && w_releaseack io.schedule.bits.dir.valid := (!s_release && w_rprobeackfirst) || (!s_writeback && no_wait) io.schedule.bits.reload := no_wait io.schedule.valid := io.schedule.bits.a.valid || io.schedule.bits.b.valid || io.schedule.bits.c.valid || io.schedule.bits.d.valid || io.schedule.bits.e.valid || io.schedule.bits.x.valid || io.schedule.bits.dir.valid // Schedule completions when (io.schedule.ready) { s_rprobe := true.B when (w_rprobeackfirst) { s_release := true.B } s_pprobe := true.B when (s_release && s_pprobe) { s_acquire := true.B } when (w_releaseack) { s_flush := true.B } when (w_pprobeackfirst) { s_probeack := true.B } when (w_grantfirst) { s_grantack := true.B } when (w_pprobeack && w_grant) { s_execute := true.B } when (no_wait) { s_writeback := true.B } // Await the next operation when (no_wait) { request_valid := false.B meta_valid := false.B } } // Resulting meta-data val final_meta_writeback = WireInit(meta) val req_clientBit = params.clientBit(request.source) val req_needT = needT(request.opcode, request.param) val req_acquire = request.opcode === AcquireBlock || request.opcode === AcquirePerm val meta_no_clients = !meta.clients.orR val req_promoteT = req_acquire && Mux(meta.hit, meta_no_clients && meta.state === TIP, gotT) when (request.prio(2) && (!params.firstLevel).B) { // always a hit final_meta_writeback.dirty := meta.dirty || request.opcode(0) final_meta_writeback.state := Mux(request.param =/= TtoT && meta.state === TRUNK, TIP, meta.state) final_meta_writeback.clients := meta.clients & ~Mux(isToN(request.param), req_clientBit, 0.U) final_meta_writeback.hit := true.B // chained requests are hits } .elsewhen (request.control && params.control.B) { // request.prio(0) when (meta.hit) { final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := meta.clients & ~probes_toN } final_meta_writeback.hit := false.B } .otherwise { final_meta_writeback.dirty := (meta.hit && meta.dirty) || !request.opcode(2) final_meta_writeback.state := Mux(req_needT, Mux(req_acquire, TRUNK, TIP), Mux(!meta.hit, Mux(gotT, Mux(req_acquire, TRUNK, TIP), BRANCH), MuxLookup(meta.state, 0.U(2.W))(Seq( INVALID -> BRANCH, BRANCH -> BRANCH, TRUNK -> TIP, TIP -> Mux(meta_no_clients && req_acquire, TRUNK, TIP))))) final_meta_writeback.clients := Mux(meta.hit, meta.clients & ~probes_toN, 0.U) | Mux(req_acquire, req_clientBit, 0.U) final_meta_writeback.tag := request.tag final_meta_writeback.hit := true.B } when (bad_grant) { when (meta.hit) { // upgrade failed (B -> T) assert (!meta_valid || meta.state === BRANCH) final_meta_writeback.hit := true.B final_meta_writeback.dirty := false.B final_meta_writeback.state := BRANCH final_meta_writeback.clients := meta.clients & ~probes_toN } .otherwise { // failed N -> (T or B) final_meta_writeback.hit := false.B final_meta_writeback.dirty := false.B final_meta_writeback.state := INVALID final_meta_writeback.clients := 0.U } } val invalid = Wire(new DirectoryEntry(params)) invalid.dirty := false.B invalid.state := INVALID invalid.clients := 0.U invalid.tag := 0.U // Just because a client says BtoT, by the time we process the request he may be N. // Therefore, we must consult our own meta-data state to confirm he owns the line still. val honour_BtoT = meta.hit && (meta.clients & req_clientBit).orR // The client asking us to act is proof they don't have permissions. val excluded_client = Mux(meta.hit && request.prio(0) && skipProbeN(request.opcode, params.cache.hintsSkipProbe), req_clientBit, 0.U) io.schedule.bits.a.bits.tag := request.tag io.schedule.bits.a.bits.set := request.set io.schedule.bits.a.bits.param := Mux(req_needT, Mux(meta.hit, BtoT, NtoT), NtoB) io.schedule.bits.a.bits.block := request.size =/= log2Ceil(params.cache.blockBytes).U || !(request.opcode === PutFullData || request.opcode === AcquirePerm) io.schedule.bits.a.bits.source := 0.U io.schedule.bits.b.bits.param := Mux(!s_rprobe, toN, Mux(request.prio(1), request.param, Mux(req_needT, toN, toB))) io.schedule.bits.b.bits.tag := Mux(!s_rprobe, meta.tag, request.tag) io.schedule.bits.b.bits.set := request.set io.schedule.bits.b.bits.clients := meta.clients & ~excluded_client io.schedule.bits.c.bits.opcode := Mux(meta.dirty, ReleaseData, Release) io.schedule.bits.c.bits.param := Mux(meta.state === BRANCH, BtoN, TtoN) io.schedule.bits.c.bits.source := 0.U io.schedule.bits.c.bits.tag := meta.tag io.schedule.bits.c.bits.set := request.set io.schedule.bits.c.bits.way := meta.way io.schedule.bits.c.bits.dirty := meta.dirty io.schedule.bits.d.bits.viewAsSupertype(chiselTypeOf(request)) := request io.schedule.bits.d.bits.param := Mux(!req_acquire, request.param, MuxLookup(request.param, request.param)(Seq( NtoB -> Mux(req_promoteT, NtoT, NtoB), BtoT -> Mux(honour_BtoT, BtoT, NtoT), NtoT -> NtoT))) io.schedule.bits.d.bits.sink := 0.U io.schedule.bits.d.bits.way := meta.way io.schedule.bits.d.bits.bad := bad_grant io.schedule.bits.e.bits.sink := sink io.schedule.bits.x.bits.fail := false.B io.schedule.bits.dir.bits.set := request.set io.schedule.bits.dir.bits.way := meta.way io.schedule.bits.dir.bits.data := Mux(!s_release, invalid, WireInit(new DirectoryEntry(params), init = final_meta_writeback)) // Coverage of state transitions def cacheState(entry: DirectoryEntry, hit: Bool) = { val out = WireDefault(0.U) val c = entry.clients.orR val d = entry.dirty switch (entry.state) { is (BRANCH) { out := Mux(c, S_BRANCH_C.code, S_BRANCH.code) } is (TRUNK) { out := Mux(d, S_TRUNK_CD.code, S_TRUNK_C.code) } is (TIP) { out := Mux(c, Mux(d, S_TIP_CD.code, S_TIP_C.code), Mux(d, S_TIP_D.code, S_TIP.code)) } is (INVALID) { out := S_INVALID.code } } when (!hit) { out := S_INVALID.code } out } val p = !params.lastLevel // can be probed val c = !params.firstLevel // can be acquired val m = params.inner.client.clients.exists(!_.supports.probe) // can be written (or read) val r = params.outer.manager.managers.exists(!_.alwaysGrantsT) // read-only devices exist val f = params.control // flush control register exists val cfg = (p, c, m, r, f) val b = r || p // can reach branch state (via probe downgrade or read-only device) // The cache must be used for something or we would not be here require(c || m) val evict = cacheState(meta, !meta.hit) val before = cacheState(meta, meta.hit) val after = cacheState(final_meta_writeback, true.B) def eviction(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(evict === from.code, s"MSHR_${from}_EVICT", s"State transition from ${from} to evicted ${cfg}") } else { assert(!(evict === from.code), cf"State transition from ${from} to evicted should be impossible ${cfg}") } if (cover && f) { params.ccover(before === from.code, s"MSHR_${from}_FLUSH", s"State transition from ${from} to flushed ${cfg}") } else { assert(!(before === from.code), cf"State transition from ${from} to flushed should be impossible ${cfg}") } } def transition(from: CacheState, to: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(before === from.code && after === to.code, s"MSHR_${from}_${to}", s"State transition from ${from} to ${to} ${cfg}") } else { assert(!(before === from.code && after === to.code), cf"State transition from ${from} to ${to} should be impossible ${cfg}") } } when ((!s_release && w_rprobeackfirst) && io.schedule.ready) { eviction(S_BRANCH, b) // MMIO read to read-only device eviction(S_BRANCH_C, b && c) // you need children to become C eviction(S_TIP, true) // MMIO read || clean release can lead to this state eviction(S_TIP_C, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client eviction(S_TIP_D, true) // MMIO write || dirty release lead here eviction(S_TRUNK_C, c) // acquire for write eviction(S_TRUNK_CD, c) // dirty release then reacquire } when ((!s_writeback && no_wait) && io.schedule.ready) { transition(S_INVALID, S_BRANCH, b && m) // only MMIO can bring us to BRANCH state transition(S_INVALID, S_BRANCH_C, b && c) // C state is only possible if there are inner caches transition(S_INVALID, S_TIP, m) // MMIO read transition(S_INVALID, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_INVALID, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_INVALID, S_TIP_D, m) // MMIO write transition(S_INVALID, S_TRUNK_C, c) // acquire transition(S_INVALID, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_INVALID, b && p) // probe can do this (flushes run as evictions) transition(S_BRANCH, S_BRANCH_C, b && c) // acquire transition(S_BRANCH, S_TIP, b && m) // prefetch write transition(S_BRANCH, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH, S_TIP_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH, S_TIP_D, b && m) // MMIO write transition(S_BRANCH, S_TRUNK_C, b && c) // acquire transition(S_BRANCH, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_BRANCH_C, S_INVALID, b && c && p) transition(S_BRANCH_C, S_BRANCH, b && c) // clean release (optional) transition(S_BRANCH_C, S_TIP, b && c && m) // prefetch write transition(S_BRANCH_C, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_BRANCH_C, S_TIP_D, b && c && m) // MMIO write transition(S_BRANCH_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_BRANCH_C, S_TRUNK_C, b && c) // acquire transition(S_BRANCH_C, S_TRUNK_CD, false) // acquire does not cause dirty immediately transition(S_TIP, S_INVALID, p) transition(S_TIP, S_BRANCH, p) // losing TIP only possible via probe transition(S_TIP, S_BRANCH_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP, S_TIP_D, m) // direct dirty only via MMIO write transition(S_TIP, S_TIP_CD, false) // acquire does not make us dirty immediately transition(S_TIP, S_TRUNK_C, c) // acquire transition(S_TIP, S_TRUNK_CD, false) // acquire does not make us dirty immediately transition(S_TIP_C, S_INVALID, c && p) transition(S_TIP_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TIP_C, S_TIP, c) // probed while MMIO read || clean release (optional) transition(S_TIP_C, S_TIP_D, c && m) // direct dirty only via MMIO write transition(S_TIP_C, S_TIP_CD, false) // going dirty means we must shoot down clients transition(S_TIP_C, S_TRUNK_C, c) // acquire transition(S_TIP_C, S_TRUNK_CD, false) // acquire does not make us immediately dirty transition(S_TIP_D, S_INVALID, p) transition(S_TIP_D, S_BRANCH, p) // losing D is only possible via probe transition(S_TIP_D, S_BRANCH_C, p && c) // probed while acquire shared transition(S_TIP_D, S_TIP, p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_D, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_D, S_TIP_CD, false) // we would go S_TRUNK_CD instead transition(S_TIP_D, S_TRUNK_C, p && c) // probed while acquired transition(S_TIP_D, S_TRUNK_CD, c) // acquire transition(S_TIP_CD, S_INVALID, c && p) transition(S_TIP_CD, S_BRANCH, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_BRANCH_C, c && p) // losing D is only possible via probe transition(S_TIP_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TIP_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TIP_CD, S_TIP_D, c) // MMIO write || clean release (optional) transition(S_TIP_CD, S_TRUNK_C, c && p) // probed while acquire transition(S_TIP_CD, S_TRUNK_CD, c) // acquire transition(S_TRUNK_C, S_INVALID, c && p) transition(S_TRUNK_C, S_BRANCH, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_BRANCH_C, c && p) // losing TIP only possible via probe transition(S_TRUNK_C, S_TIP, c) // MMIO read || clean release (optional) transition(S_TRUNK_C, S_TIP_C, c) // bounce shared transition(S_TRUNK_C, S_TIP_D, c) // dirty release transition(S_TRUNK_C, S_TIP_CD, c) // dirty bounce shared transition(S_TRUNK_C, S_TRUNK_CD, c) // dirty bounce transition(S_TRUNK_CD, S_INVALID, c && p) transition(S_TRUNK_CD, S_BRANCH, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_BRANCH_C, c && p) // losing D only possible via probe transition(S_TRUNK_CD, S_TIP, c && p) // probed while MMIO read || outer probe.toT (optional) transition(S_TRUNK_CD, S_TIP_C, false) // we would go S_TRUNK_C instead transition(S_TRUNK_CD, S_TIP_D, c) // dirty release transition(S_TRUNK_CD, S_TIP_CD, c) // bounce shared transition(S_TRUNK_CD, S_TRUNK_C, c && p) // probed while acquire } // Handle response messages val probe_bit = params.clientBit(io.sinkc.bits.source) val last_probe = (probes_done | probe_bit) === (meta.clients & ~excluded_client) val probe_toN = isToN(io.sinkc.bits.param) if (!params.firstLevel) when (io.sinkc.valid) { params.ccover( probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_FULL", "Client downgraded to N when asked only to do B") params.ccover(!probe_toN && io.schedule.bits.b.bits.param === toB, "MSHR_PROBE_HALF", "Client downgraded to B when asked only to do B") // Caution: the probe matches us only in set. // We would never allow an outer probe to nest until both w_[rp]probeack complete, so // it is safe to just unguardedly update the probe FSM. probes_done := probes_done | probe_bit probes_toN := probes_toN | Mux(probe_toN, probe_bit, 0.U) probes_noT := probes_noT || io.sinkc.bits.param =/= TtoT w_rprobeackfirst := w_rprobeackfirst || last_probe w_rprobeacklast := w_rprobeacklast || (last_probe && io.sinkc.bits.last) w_pprobeackfirst := w_pprobeackfirst || last_probe w_pprobeacklast := w_pprobeacklast || (last_probe && io.sinkc.bits.last) // Allow wormhole routing from sinkC if the first request beat has offset 0 val set_pprobeack = last_probe && (io.sinkc.bits.last || request.offset === 0.U) w_pprobeack := w_pprobeack || set_pprobeack params.ccover(!set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_SERIAL", "Sequential routing of probe response data") params.ccover( set_pprobeack && w_rprobeackfirst, "MSHR_PROBE_WORMHOLE", "Wormhole routing of probe response data") // However, meta-data updates need to be done more cautiously when (meta.state =/= INVALID && io.sinkc.bits.tag === meta.tag && io.sinkc.bits.data) { meta.dirty := true.B } // !!! } when (io.sinkd.valid) { when (io.sinkd.bits.opcode === Grant || io.sinkd.bits.opcode === GrantData) { sink := io.sinkd.bits.sink w_grantfirst := true.B w_grantlast := io.sinkd.bits.last // Record if we need to prevent taking ownership bad_grant := io.sinkd.bits.denied // Allow wormhole routing for requests whose first beat has offset 0 w_grant := request.offset === 0.U || io.sinkd.bits.last params.ccover(io.sinkd.bits.opcode === GrantData && request.offset === 0.U, "MSHR_GRANT_WORMHOLE", "Wormhole routing of grant response data") params.ccover(io.sinkd.bits.opcode === GrantData && request.offset =/= 0.U, "MSHR_GRANT_SERIAL", "Sequential routing of grant response data") gotT := io.sinkd.bits.param === toT } .elsewhen (io.sinkd.bits.opcode === ReleaseAck) { w_releaseack := true.B } } when (io.sinke.valid) { w_grantack := true.B } // Bootstrap new requests val allocate_as_full = WireInit(new FullRequest(params), init = io.allocate.bits) val new_meta = Mux(io.allocate.valid && io.allocate.bits.repeat, final_meta_writeback, io.directory.bits) val new_request = Mux(io.allocate.valid, allocate_as_full, request) val new_needT = needT(new_request.opcode, new_request.param) val new_clientBit = params.clientBit(new_request.source) val new_skipProbe = Mux(skipProbeN(new_request.opcode, params.cache.hintsSkipProbe), new_clientBit, 0.U) val prior = cacheState(final_meta_writeback, true.B) def bypass(from: CacheState, cover: Boolean)(implicit sourceInfo: SourceInfo) { if (cover) { params.ccover(prior === from.code, s"MSHR_${from}_BYPASS", s"State bypass transition from ${from} ${cfg}") } else { assert(!(prior === from.code), cf"State bypass from ${from} should be impossible ${cfg}") } } when (io.allocate.valid && io.allocate.bits.repeat) { bypass(S_INVALID, f || p) // Can lose permissions (probe/flush) bypass(S_BRANCH, b) // MMIO read to read-only device bypass(S_BRANCH_C, b && c) // you need children to become C bypass(S_TIP, true) // MMIO read || clean release can lead to this state bypass(S_TIP_C, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_CD, c) // needs two clients || client + mmio || downgrading client bypass(S_TIP_D, true) // MMIO write || dirty release lead here bypass(S_TRUNK_C, c) // acquire for write bypass(S_TRUNK_CD, c) // dirty release then reacquire } when (io.allocate.valid) { assert (!request_valid || (no_wait && io.schedule.fire)) request_valid := true.B request := io.allocate.bits } // Create execution plan when (io.directory.valid || (io.allocate.valid && io.allocate.bits.repeat)) { meta_valid := true.B meta := new_meta probes_done := 0.U probes_toN := 0.U probes_noT := false.B gotT := false.B bad_grant := false.B // These should already be either true or turning true // We clear them here explicitly to simplify the mux tree s_rprobe := true.B w_rprobeackfirst := true.B w_rprobeacklast := true.B s_release := true.B w_releaseack := true.B s_pprobe := true.B s_acquire := true.B s_flush := true.B w_grantfirst := true.B w_grantlast := true.B w_grant := true.B w_pprobeackfirst := true.B w_pprobeacklast := true.B w_pprobeack := true.B s_probeack := true.B s_grantack := true.B s_execute := true.B w_grantack := true.B s_writeback := true.B // For C channel requests (ie: Release[Data]) when (new_request.prio(2) && (!params.firstLevel).B) { s_execute := false.B // Do we need to go dirty? when (new_request.opcode(0) && !new_meta.dirty) { s_writeback := false.B } // Does our state change? when (isToB(new_request.param) && new_meta.state === TRUNK) { s_writeback := false.B } // Do our clients change? when (isToN(new_request.param) && (new_meta.clients & new_clientBit) =/= 0.U) { s_writeback := false.B } assert (new_meta.hit) } // For X channel requests (ie: flush) .elsewhen (new_request.control && params.control.B) { // new_request.prio(0) s_flush := false.B // Do we need to actually do something? when (new_meta.hit) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B && (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } } // For A channel requests .otherwise { // new_request.prio(0) && !new_request.control s_execute := false.B // Do we need an eviction? when (!new_meta.hit && new_meta.state =/= INVALID) { s_release := false.B w_releaseack := false.B // Do we need to shoot-down inner caches? when ((!params.firstLevel).B & (new_meta.clients =/= 0.U)) { s_rprobe := false.B w_rprobeackfirst := false.B w_rprobeacklast := false.B } } // Do we need an acquire? when (!new_meta.hit || (new_meta.state === BRANCH && new_needT)) { s_acquire := false.B w_grantfirst := false.B w_grantlast := false.B w_grant := false.B s_grantack := false.B s_writeback := false.B } // Do we need a probe? when ((!params.firstLevel).B && (new_meta.hit && (new_needT || new_meta.state === TRUNK) && (new_meta.clients & ~new_skipProbe) =/= 0.U)) { s_pprobe := false.B w_pprobeackfirst := false.B w_pprobeacklast := false.B w_pprobeack := false.B s_writeback := false.B } // Do we need a grantack? when (new_request.opcode === AcquireBlock || new_request.opcode === AcquirePerm) { w_grantack := false.B s_writeback := false.B } // Becomes dirty? when (!new_request.opcode(2) && new_meta.hit && !new_meta.dirty) { s_writeback := false.B } } } } File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
module MSHR_5( // @[MSHR.scala:84:7] input clock, // @[MSHR.scala:84:7] input reset, // @[MSHR.scala:84:7] input io_allocate_valid, // @[MSHR.scala:86:14] input io_allocate_bits_prio_0, // @[MSHR.scala:86:14] input io_allocate_bits_prio_1, // @[MSHR.scala:86:14] input io_allocate_bits_prio_2, // @[MSHR.scala:86:14] input io_allocate_bits_control, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_param, // @[MSHR.scala:86:14] input [2:0] io_allocate_bits_size, // @[MSHR.scala:86:14] input [6:0] io_allocate_bits_source, // @[MSHR.scala:86:14] input [12:0] io_allocate_bits_tag, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_offset, // @[MSHR.scala:86:14] input [5:0] io_allocate_bits_put, // @[MSHR.scala:86:14] input [9:0] io_allocate_bits_set, // @[MSHR.scala:86:14] input io_allocate_bits_repeat, // @[MSHR.scala:86:14] input io_directory_valid, // @[MSHR.scala:86:14] input io_directory_bits_dirty, // @[MSHR.scala:86:14] input [1:0] io_directory_bits_state, // @[MSHR.scala:86:14] input io_directory_bits_clients, // @[MSHR.scala:86:14] input [12:0] io_directory_bits_tag, // @[MSHR.scala:86:14] input io_directory_bits_hit, // @[MSHR.scala:86:14] input [2:0] io_directory_bits_way, // @[MSHR.scala:86:14] output io_status_valid, // @[MSHR.scala:86:14] output [9:0] io_status_bits_set, // @[MSHR.scala:86:14] output [12:0] io_status_bits_tag, // @[MSHR.scala:86:14] output [2:0] io_status_bits_way, // @[MSHR.scala:86:14] output io_status_bits_blockB, // @[MSHR.scala:86:14] output io_status_bits_nestB, // @[MSHR.scala:86:14] output io_status_bits_blockC, // @[MSHR.scala:86:14] output io_status_bits_nestC, // @[MSHR.scala:86:14] input io_schedule_ready, // @[MSHR.scala:86:14] output io_schedule_valid, // @[MSHR.scala:86:14] output io_schedule_bits_a_valid, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_a_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_a_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_a_bits_param, // @[MSHR.scala:86:14] output io_schedule_bits_a_bits_block, // @[MSHR.scala:86:14] output io_schedule_bits_b_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_b_bits_param, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_b_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_b_bits_set, // @[MSHR.scala:86:14] output io_schedule_bits_b_bits_clients, // @[MSHR.scala:86:14] output io_schedule_bits_c_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_param, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_c_bits_tag, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_c_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_c_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_c_bits_dirty, // @[MSHR.scala:86:14] output io_schedule_bits_d_valid, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_0, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_1, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_prio_2, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_control, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_opcode, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_param, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_size, // @[MSHR.scala:86:14] output [6:0] io_schedule_bits_d_bits_source, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_d_bits_tag, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_offset, // @[MSHR.scala:86:14] output [5:0] io_schedule_bits_d_bits_put, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_d_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_d_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_d_bits_bad, // @[MSHR.scala:86:14] output io_schedule_bits_e_valid, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_e_bits_sink, // @[MSHR.scala:86:14] output io_schedule_bits_x_valid, // @[MSHR.scala:86:14] output io_schedule_bits_dir_valid, // @[MSHR.scala:86:14] output [9:0] io_schedule_bits_dir_bits_set, // @[MSHR.scala:86:14] output [2:0] io_schedule_bits_dir_bits_way, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_dirty, // @[MSHR.scala:86:14] output [1:0] io_schedule_bits_dir_bits_data_state, // @[MSHR.scala:86:14] output io_schedule_bits_dir_bits_data_clients, // @[MSHR.scala:86:14] output [12:0] io_schedule_bits_dir_bits_data_tag, // @[MSHR.scala:86:14] output io_schedule_bits_reload, // @[MSHR.scala:86:14] input io_sinkc_valid, // @[MSHR.scala:86:14] input io_sinkc_bits_last, // @[MSHR.scala:86:14] input [9:0] io_sinkc_bits_set, // @[MSHR.scala:86:14] input [12:0] io_sinkc_bits_tag, // @[MSHR.scala:86:14] input [6:0] io_sinkc_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkc_bits_param, // @[MSHR.scala:86:14] input io_sinkc_bits_data, // @[MSHR.scala:86:14] input io_sinkd_valid, // @[MSHR.scala:86:14] input io_sinkd_bits_last, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_opcode, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_param, // @[MSHR.scala:86:14] input [3:0] io_sinkd_bits_source, // @[MSHR.scala:86:14] input [2:0] io_sinkd_bits_sink, // @[MSHR.scala:86:14] input io_sinkd_bits_denied, // @[MSHR.scala:86:14] input io_sinke_valid, // @[MSHR.scala:86:14] input [3:0] io_sinke_bits_sink, // @[MSHR.scala:86:14] input [9:0] io_nestedwb_set, // @[MSHR.scala:86:14] input [12:0] io_nestedwb_tag, // @[MSHR.scala:86:14] input io_nestedwb_b_toN, // @[MSHR.scala:86:14] input io_nestedwb_b_toB, // @[MSHR.scala:86:14] input io_nestedwb_b_clr_dirty, // @[MSHR.scala:86:14] input io_nestedwb_c_set_dirty // @[MSHR.scala:86:14] ); wire [12:0] final_meta_writeback_tag; // @[MSHR.scala:215:38] wire final_meta_writeback_clients; // @[MSHR.scala:215:38] wire [1:0] final_meta_writeback_state; // @[MSHR.scala:215:38] wire final_meta_writeback_dirty; // @[MSHR.scala:215:38] wire io_allocate_valid_0 = io_allocate_valid; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_0_0 = io_allocate_bits_prio_0; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_1_0 = io_allocate_bits_prio_1; // @[MSHR.scala:84:7] wire io_allocate_bits_prio_2_0 = io_allocate_bits_prio_2; // @[MSHR.scala:84:7] wire io_allocate_bits_control_0 = io_allocate_bits_control; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_opcode_0 = io_allocate_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_param_0 = io_allocate_bits_param; // @[MSHR.scala:84:7] wire [2:0] io_allocate_bits_size_0 = io_allocate_bits_size; // @[MSHR.scala:84:7] wire [6:0] io_allocate_bits_source_0 = io_allocate_bits_source; // @[MSHR.scala:84:7] wire [12:0] io_allocate_bits_tag_0 = io_allocate_bits_tag; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_offset_0 = io_allocate_bits_offset; // @[MSHR.scala:84:7] wire [5:0] io_allocate_bits_put_0 = io_allocate_bits_put; // @[MSHR.scala:84:7] wire [9:0] io_allocate_bits_set_0 = io_allocate_bits_set; // @[MSHR.scala:84:7] wire io_allocate_bits_repeat_0 = io_allocate_bits_repeat; // @[MSHR.scala:84:7] wire io_directory_valid_0 = io_directory_valid; // @[MSHR.scala:84:7] wire io_directory_bits_dirty_0 = io_directory_bits_dirty; // @[MSHR.scala:84:7] wire [1:0] io_directory_bits_state_0 = io_directory_bits_state; // @[MSHR.scala:84:7] wire io_directory_bits_clients_0 = io_directory_bits_clients; // @[MSHR.scala:84:7] wire [12:0] io_directory_bits_tag_0 = io_directory_bits_tag; // @[MSHR.scala:84:7] wire io_directory_bits_hit_0 = io_directory_bits_hit; // @[MSHR.scala:84:7] wire [2:0] io_directory_bits_way_0 = io_directory_bits_way; // @[MSHR.scala:84:7] wire io_schedule_ready_0 = io_schedule_ready; // @[MSHR.scala:84:7] wire io_sinkc_valid_0 = io_sinkc_valid; // @[MSHR.scala:84:7] wire io_sinkc_bits_last_0 = io_sinkc_bits_last; // @[MSHR.scala:84:7] wire [9:0] io_sinkc_bits_set_0 = io_sinkc_bits_set; // @[MSHR.scala:84:7] wire [12:0] io_sinkc_bits_tag_0 = io_sinkc_bits_tag; // @[MSHR.scala:84:7] wire [6:0] io_sinkc_bits_source_0 = io_sinkc_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkc_bits_param_0 = io_sinkc_bits_param; // @[MSHR.scala:84:7] wire io_sinkc_bits_data_0 = io_sinkc_bits_data; // @[MSHR.scala:84:7] wire io_sinkd_valid_0 = io_sinkd_valid; // @[MSHR.scala:84:7] wire io_sinkd_bits_last_0 = io_sinkd_bits_last; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_opcode_0 = io_sinkd_bits_opcode; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_param_0 = io_sinkd_bits_param; // @[MSHR.scala:84:7] wire [3:0] io_sinkd_bits_source_0 = io_sinkd_bits_source; // @[MSHR.scala:84:7] wire [2:0] io_sinkd_bits_sink_0 = io_sinkd_bits_sink; // @[MSHR.scala:84:7] wire io_sinkd_bits_denied_0 = io_sinkd_bits_denied; // @[MSHR.scala:84:7] wire io_sinke_valid_0 = io_sinke_valid; // @[MSHR.scala:84:7] wire [3:0] io_sinke_bits_sink_0 = io_sinke_bits_sink; // @[MSHR.scala:84:7] wire [9:0] io_nestedwb_set_0 = io_nestedwb_set; // @[MSHR.scala:84:7] wire [12:0] io_nestedwb_tag_0 = io_nestedwb_tag; // @[MSHR.scala:84:7] wire io_nestedwb_b_toN_0 = io_nestedwb_b_toN; // @[MSHR.scala:84:7] wire io_nestedwb_b_toB_0 = io_nestedwb_b_toB; // @[MSHR.scala:84:7] wire io_nestedwb_b_clr_dirty_0 = io_nestedwb_b_clr_dirty; // @[MSHR.scala:84:7] wire io_nestedwb_c_set_dirty_0 = io_nestedwb_c_set_dirty; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_a_bits_source = 4'h0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_c_bits_source = 4'h0; // @[MSHR.scala:84:7] wire [3:0] io_schedule_bits_d_bits_sink = 4'h0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_bits_fail = 1'h0; // @[MSHR.scala:84:7] wire _io_schedule_bits_c_valid_T_2 = 1'h0; // @[MSHR.scala:186:68] wire _io_schedule_bits_c_valid_T_3 = 1'h0; // @[MSHR.scala:186:80] wire invalid_dirty = 1'h0; // @[MSHR.scala:268:21] wire invalid_clients = 1'h0; // @[MSHR.scala:268:21] wire _excluded_client_T_7 = 1'h0; // @[Parameters.scala:279:137] wire _after_T_4 = 1'h0; // @[MSHR.scala:323:11] wire _new_skipProbe_T_6 = 1'h0; // @[Parameters.scala:279:137] wire _prior_T_4 = 1'h0; // @[MSHR.scala:323:11] wire [12:0] invalid_tag = 13'h0; // @[MSHR.scala:268:21] wire [1:0] invalid_state = 2'h0; // @[MSHR.scala:268:21] wire [1:0] _final_meta_writeback_state_T_11 = 2'h1; // @[MSHR.scala:240:70] wire allocate_as_full_prio_0 = io_allocate_bits_prio_0_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_1 = io_allocate_bits_prio_1_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_prio_2 = io_allocate_bits_prio_2_0; // @[MSHR.scala:84:7, :504:34] wire allocate_as_full_control = io_allocate_bits_control_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_opcode = io_allocate_bits_opcode_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_param = io_allocate_bits_param_0; // @[MSHR.scala:84:7, :504:34] wire [2:0] allocate_as_full_size = io_allocate_bits_size_0; // @[MSHR.scala:84:7, :504:34] wire [6:0] allocate_as_full_source = io_allocate_bits_source_0; // @[MSHR.scala:84:7, :504:34] wire [12:0] allocate_as_full_tag = io_allocate_bits_tag_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_offset = io_allocate_bits_offset_0; // @[MSHR.scala:84:7, :504:34] wire [5:0] allocate_as_full_put = io_allocate_bits_put_0; // @[MSHR.scala:84:7, :504:34] wire [9:0] allocate_as_full_set = io_allocate_bits_set_0; // @[MSHR.scala:84:7, :504:34] wire _io_status_bits_blockB_T_8; // @[MSHR.scala:168:40] wire _io_status_bits_nestB_T_4; // @[MSHR.scala:169:93] wire _io_status_bits_blockC_T; // @[MSHR.scala:172:28] wire _io_status_bits_nestC_T_5; // @[MSHR.scala:173:39] wire _io_schedule_valid_T_5; // @[MSHR.scala:193:105] wire _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:184:55] wire _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:283:91] wire _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:185:41] wire [2:0] _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:286:41] wire [12:0] _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:287:41] wire _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:289:51] wire _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:186:64] wire [2:0] _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:290:41] wire [2:0] _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:291:41] wire _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:187:57] wire [2:0] _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:298:41] wire _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:188:43] wire _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:189:40] wire _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:190:66] wire _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:310:41] wire [1:0] _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:310:41] wire _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:310:41] wire [12:0] _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:310:41] wire no_wait; // @[MSHR.scala:183:83] wire [9:0] io_status_bits_set_0; // @[MSHR.scala:84:7] wire [12:0] io_status_bits_tag_0; // @[MSHR.scala:84:7] wire [2:0] io_status_bits_way_0; // @[MSHR.scala:84:7] wire io_status_bits_blockB_0; // @[MSHR.scala:84:7] wire io_status_bits_nestB_0; // @[MSHR.scala:84:7] wire io_status_bits_blockC_0; // @[MSHR.scala:84:7] wire io_status_bits_nestC_0; // @[MSHR.scala:84:7] wire io_status_valid_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_a_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_a_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_a_bits_param_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_bits_block_0; // @[MSHR.scala:84:7] wire io_schedule_bits_a_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_b_bits_param_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_b_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_b_bits_set_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_bits_clients_0; // @[MSHR.scala:84:7] wire io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_param_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_c_bits_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_c_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_c_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_bits_dirty_0; // @[MSHR.scala:84:7] wire io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_0_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_1_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_prio_2_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_control_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_opcode_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_param_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_size_0; // @[MSHR.scala:84:7] wire [6:0] io_schedule_bits_d_bits_source_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_d_bits_tag_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_offset_0; // @[MSHR.scala:84:7] wire [5:0] io_schedule_bits_d_bits_put_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_d_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_d_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_bits_bad_0; // @[MSHR.scala:84:7] wire io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_e_bits_sink_0; // @[MSHR.scala:84:7] wire io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_dirty_0; // @[MSHR.scala:84:7] wire [1:0] io_schedule_bits_dir_bits_data_state_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_bits_data_clients_0; // @[MSHR.scala:84:7] wire [12:0] io_schedule_bits_dir_bits_data_tag_0; // @[MSHR.scala:84:7] wire [9:0] io_schedule_bits_dir_bits_set_0; // @[MSHR.scala:84:7] wire [2:0] io_schedule_bits_dir_bits_way_0; // @[MSHR.scala:84:7] wire io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7] wire io_schedule_bits_reload_0; // @[MSHR.scala:84:7] wire io_schedule_valid_0; // @[MSHR.scala:84:7] reg request_valid; // @[MSHR.scala:97:30] assign io_status_valid_0 = request_valid; // @[MSHR.scala:84:7, :97:30] reg request_prio_0; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_0_0 = request_prio_0; // @[MSHR.scala:84:7, :98:20] reg request_prio_1; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_1_0 = request_prio_1; // @[MSHR.scala:84:7, :98:20] reg request_prio_2; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_prio_2_0 = request_prio_2; // @[MSHR.scala:84:7, :98:20] reg request_control; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_control_0 = request_control; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_opcode; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_opcode_0 = request_opcode; // @[MSHR.scala:84:7, :98:20] reg [2:0] request_param; // @[MSHR.scala:98:20] reg [2:0] request_size; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_size_0 = request_size; // @[MSHR.scala:84:7, :98:20] reg [6:0] request_source; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_source_0 = request_source; // @[MSHR.scala:84:7, :98:20] reg [12:0] request_tag; // @[MSHR.scala:98:20] assign io_status_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_tag_0 = request_tag; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_offset; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_offset_0 = request_offset; // @[MSHR.scala:84:7, :98:20] reg [5:0] request_put; // @[MSHR.scala:98:20] assign io_schedule_bits_d_bits_put_0 = request_put; // @[MSHR.scala:84:7, :98:20] reg [9:0] request_set; // @[MSHR.scala:98:20] assign io_status_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_a_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_b_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_c_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_d_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] assign io_schedule_bits_dir_bits_set_0 = request_set; // @[MSHR.scala:84:7, :98:20] reg meta_valid; // @[MSHR.scala:99:27] reg meta_dirty; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_dirty_0 = meta_dirty; // @[MSHR.scala:84:7, :100:17] reg [1:0] meta_state; // @[MSHR.scala:100:17] reg meta_clients; // @[MSHR.scala:100:17] wire _meta_no_clients_T = meta_clients; // @[MSHR.scala:100:17, :220:39] wire evict_c = meta_clients; // @[MSHR.scala:100:17, :315:27] wire before_c = meta_clients; // @[MSHR.scala:100:17, :315:27] reg [12:0] meta_tag; // @[MSHR.scala:100:17] assign io_schedule_bits_c_bits_tag_0 = meta_tag; // @[MSHR.scala:84:7, :100:17] reg meta_hit; // @[MSHR.scala:100:17] reg [2:0] meta_way; // @[MSHR.scala:100:17] assign io_status_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_c_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_d_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] assign io_schedule_bits_dir_bits_way_0 = meta_way; // @[MSHR.scala:84:7, :100:17] wire [2:0] final_meta_writeback_way = meta_way; // @[MSHR.scala:100:17, :215:38] reg s_rprobe; // @[MSHR.scala:121:33] reg w_rprobeackfirst; // @[MSHR.scala:122:33] reg w_rprobeacklast; // @[MSHR.scala:123:33] reg s_release; // @[MSHR.scala:124:33] reg w_releaseack; // @[MSHR.scala:125:33] reg s_pprobe; // @[MSHR.scala:126:33] reg s_acquire; // @[MSHR.scala:127:33] reg s_flush; // @[MSHR.scala:128:33] reg w_grantfirst; // @[MSHR.scala:129:33] reg w_grantlast; // @[MSHR.scala:130:33] reg w_grant; // @[MSHR.scala:131:33] reg w_pprobeackfirst; // @[MSHR.scala:132:33] reg w_pprobeacklast; // @[MSHR.scala:133:33] reg w_pprobeack; // @[MSHR.scala:134:33] reg s_grantack; // @[MSHR.scala:136:33] reg s_execute; // @[MSHR.scala:137:33] reg w_grantack; // @[MSHR.scala:138:33] reg s_writeback; // @[MSHR.scala:139:33] reg [2:0] sink; // @[MSHR.scala:147:17] assign io_schedule_bits_e_bits_sink_0 = sink; // @[MSHR.scala:84:7, :147:17] reg gotT; // @[MSHR.scala:148:17] reg bad_grant; // @[MSHR.scala:149:22] assign io_schedule_bits_d_bits_bad_0 = bad_grant; // @[MSHR.scala:84:7, :149:22] reg probes_done; // @[MSHR.scala:150:24] reg probes_toN; // @[MSHR.scala:151:23] reg probes_noT; // @[MSHR.scala:152:23] wire _io_status_bits_blockB_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28] wire _io_status_bits_blockB_T_1 = ~w_releaseack; // @[MSHR.scala:125:33, :168:45] wire _io_status_bits_blockB_T_2 = ~w_rprobeacklast; // @[MSHR.scala:123:33, :168:62] wire _io_status_bits_blockB_T_3 = _io_status_bits_blockB_T_1 | _io_status_bits_blockB_T_2; // @[MSHR.scala:168:{45,59,62}] wire _io_status_bits_blockB_T_4 = ~w_pprobeacklast; // @[MSHR.scala:133:33, :168:82] wire _io_status_bits_blockB_T_5 = _io_status_bits_blockB_T_3 | _io_status_bits_blockB_T_4; // @[MSHR.scala:168:{59,79,82}] wire _io_status_bits_blockB_T_6 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103] wire _io_status_bits_blockB_T_7 = _io_status_bits_blockB_T_5 & _io_status_bits_blockB_T_6; // @[MSHR.scala:168:{79,100,103}] assign _io_status_bits_blockB_T_8 = _io_status_bits_blockB_T | _io_status_bits_blockB_T_7; // @[MSHR.scala:168:{28,40,100}] assign io_status_bits_blockB_0 = _io_status_bits_blockB_T_8; // @[MSHR.scala:84:7, :168:40] wire _io_status_bits_nestB_T = meta_valid & w_releaseack; // @[MSHR.scala:99:27, :125:33, :169:39] wire _io_status_bits_nestB_T_1 = _io_status_bits_nestB_T & w_rprobeacklast; // @[MSHR.scala:123:33, :169:{39,55}] wire _io_status_bits_nestB_T_2 = _io_status_bits_nestB_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :169:{55,74}] wire _io_status_bits_nestB_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :169:96] assign _io_status_bits_nestB_T_4 = _io_status_bits_nestB_T_2 & _io_status_bits_nestB_T_3; // @[MSHR.scala:169:{74,93,96}] assign io_status_bits_nestB_0 = _io_status_bits_nestB_T_4; // @[MSHR.scala:84:7, :169:93] assign _io_status_bits_blockC_T = ~meta_valid; // @[MSHR.scala:99:27, :168:28, :172:28] assign io_status_bits_blockC_0 = _io_status_bits_blockC_T; // @[MSHR.scala:84:7, :172:28] wire _io_status_bits_nestC_T = ~w_rprobeackfirst; // @[MSHR.scala:122:33, :173:43] wire _io_status_bits_nestC_T_1 = ~w_pprobeackfirst; // @[MSHR.scala:132:33, :173:64] wire _io_status_bits_nestC_T_2 = _io_status_bits_nestC_T | _io_status_bits_nestC_T_1; // @[MSHR.scala:173:{43,61,64}] wire _io_status_bits_nestC_T_3 = ~w_grantfirst; // @[MSHR.scala:129:33, :168:103, :173:85] wire _io_status_bits_nestC_T_4 = _io_status_bits_nestC_T_2 | _io_status_bits_nestC_T_3; // @[MSHR.scala:173:{61,82,85}] assign _io_status_bits_nestC_T_5 = meta_valid & _io_status_bits_nestC_T_4; // @[MSHR.scala:99:27, :173:{39,82}] assign io_status_bits_nestC_0 = _io_status_bits_nestC_T_5; // @[MSHR.scala:84:7, :173:39] wire _no_wait_T = w_rprobeacklast & w_releaseack; // @[MSHR.scala:123:33, :125:33, :183:33] wire _no_wait_T_1 = _no_wait_T & w_grantlast; // @[MSHR.scala:130:33, :183:{33,49}] wire _no_wait_T_2 = _no_wait_T_1 & w_pprobeacklast; // @[MSHR.scala:133:33, :183:{49,64}] assign no_wait = _no_wait_T_2 & w_grantack; // @[MSHR.scala:138:33, :183:{64,83}] assign io_schedule_bits_reload_0 = no_wait; // @[MSHR.scala:84:7, :183:83] wire _io_schedule_bits_a_valid_T = ~s_acquire; // @[MSHR.scala:127:33, :184:31] wire _io_schedule_bits_a_valid_T_1 = _io_schedule_bits_a_valid_T & s_release; // @[MSHR.scala:124:33, :184:{31,42}] assign _io_schedule_bits_a_valid_T_2 = _io_schedule_bits_a_valid_T_1 & s_pprobe; // @[MSHR.scala:126:33, :184:{42,55}] assign io_schedule_bits_a_valid_0 = _io_schedule_bits_a_valid_T_2; // @[MSHR.scala:84:7, :184:55] wire _io_schedule_bits_b_valid_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31] wire _io_schedule_bits_b_valid_T_1 = ~s_pprobe; // @[MSHR.scala:126:33, :185:44] assign _io_schedule_bits_b_valid_T_2 = _io_schedule_bits_b_valid_T | _io_schedule_bits_b_valid_T_1; // @[MSHR.scala:185:{31,41,44}] assign io_schedule_bits_b_valid_0 = _io_schedule_bits_b_valid_T_2; // @[MSHR.scala:84:7, :185:41] wire _io_schedule_bits_c_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32] wire _io_schedule_bits_c_valid_T_1 = _io_schedule_bits_c_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :186:{32,43}] assign _io_schedule_bits_c_valid_T_4 = _io_schedule_bits_c_valid_T_1; // @[MSHR.scala:186:{43,64}] assign io_schedule_bits_c_valid_0 = _io_schedule_bits_c_valid_T_4; // @[MSHR.scala:84:7, :186:64] wire _io_schedule_bits_d_valid_T = ~s_execute; // @[MSHR.scala:137:33, :187:31] wire _io_schedule_bits_d_valid_T_1 = _io_schedule_bits_d_valid_T & w_pprobeack; // @[MSHR.scala:134:33, :187:{31,42}] assign _io_schedule_bits_d_valid_T_2 = _io_schedule_bits_d_valid_T_1 & w_grant; // @[MSHR.scala:131:33, :187:{42,57}] assign io_schedule_bits_d_valid_0 = _io_schedule_bits_d_valid_T_2; // @[MSHR.scala:84:7, :187:57] wire _io_schedule_bits_e_valid_T = ~s_grantack; // @[MSHR.scala:136:33, :188:31] assign _io_schedule_bits_e_valid_T_1 = _io_schedule_bits_e_valid_T & w_grantfirst; // @[MSHR.scala:129:33, :188:{31,43}] assign io_schedule_bits_e_valid_0 = _io_schedule_bits_e_valid_T_1; // @[MSHR.scala:84:7, :188:43] wire _io_schedule_bits_x_valid_T = ~s_flush; // @[MSHR.scala:128:33, :189:31] assign _io_schedule_bits_x_valid_T_1 = _io_schedule_bits_x_valid_T & w_releaseack; // @[MSHR.scala:125:33, :189:{31,40}] assign io_schedule_bits_x_valid_0 = _io_schedule_bits_x_valid_T_1; // @[MSHR.scala:84:7, :189:40] wire _io_schedule_bits_dir_valid_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :190:34] wire _io_schedule_bits_dir_valid_T_1 = _io_schedule_bits_dir_valid_T & w_rprobeackfirst; // @[MSHR.scala:122:33, :190:{34,45}] wire _io_schedule_bits_dir_valid_T_2 = ~s_writeback; // @[MSHR.scala:139:33, :190:70] wire _io_schedule_bits_dir_valid_T_3 = _io_schedule_bits_dir_valid_T_2 & no_wait; // @[MSHR.scala:183:83, :190:{70,83}] assign _io_schedule_bits_dir_valid_T_4 = _io_schedule_bits_dir_valid_T_1 | _io_schedule_bits_dir_valid_T_3; // @[MSHR.scala:190:{45,66,83}] assign io_schedule_bits_dir_valid_0 = _io_schedule_bits_dir_valid_T_4; // @[MSHR.scala:84:7, :190:66] wire _io_schedule_valid_T = io_schedule_bits_a_valid_0 | io_schedule_bits_b_valid_0; // @[MSHR.scala:84:7, :192:49] wire _io_schedule_valid_T_1 = _io_schedule_valid_T | io_schedule_bits_c_valid_0; // @[MSHR.scala:84:7, :192:{49,77}] wire _io_schedule_valid_T_2 = _io_schedule_valid_T_1 | io_schedule_bits_d_valid_0; // @[MSHR.scala:84:7, :192:{77,105}] wire _io_schedule_valid_T_3 = _io_schedule_valid_T_2 | io_schedule_bits_e_valid_0; // @[MSHR.scala:84:7, :192:105, :193:49] wire _io_schedule_valid_T_4 = _io_schedule_valid_T_3 | io_schedule_bits_x_valid_0; // @[MSHR.scala:84:7, :193:{49,77}] assign _io_schedule_valid_T_5 = _io_schedule_valid_T_4 | io_schedule_bits_dir_valid_0; // @[MSHR.scala:84:7, :193:{77,105}] assign io_schedule_valid_0 = _io_schedule_valid_T_5; // @[MSHR.scala:84:7, :193:105] wire _io_schedule_bits_dir_bits_data_WIRE_dirty = final_meta_writeback_dirty; // @[MSHR.scala:215:38, :310:71] wire [1:0] _io_schedule_bits_dir_bits_data_WIRE_state = final_meta_writeback_state; // @[MSHR.scala:215:38, :310:71] wire _io_schedule_bits_dir_bits_data_WIRE_clients = final_meta_writeback_clients; // @[MSHR.scala:215:38, :310:71] wire after_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire prior_c = final_meta_writeback_clients; // @[MSHR.scala:215:38, :315:27] wire [12:0] _io_schedule_bits_dir_bits_data_WIRE_tag = final_meta_writeback_tag; // @[MSHR.scala:215:38, :310:71] wire final_meta_writeback_hit; // @[MSHR.scala:215:38] wire req_clientBit = request_source == 7'h20; // @[Parameters.scala:46:9] wire _req_needT_T = request_opcode[2]; // @[Parameters.scala:269:12] wire _final_meta_writeback_dirty_T_3 = request_opcode[2]; // @[Parameters.scala:269:12] wire _req_needT_T_1 = ~_req_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN = request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _req_needT_T_2; // @[Parameters.scala:270:13] assign _req_needT_T_2 = _GEN; // @[Parameters.scala:270:13] wire _excluded_client_T_6; // @[Parameters.scala:279:117] assign _excluded_client_T_6 = _GEN; // @[Parameters.scala:270:13, :279:117] wire _GEN_0 = request_param == 3'h1; // @[Parameters.scala:270:42] wire _req_needT_T_3; // @[Parameters.scala:270:42] assign _req_needT_T_3 = _GEN_0; // @[Parameters.scala:270:42] wire _final_meta_writeback_clients_T; // @[Parameters.scala:282:11] assign _final_meta_writeback_clients_T = _GEN_0; // @[Parameters.scala:270:42, :282:11] wire _io_schedule_bits_d_bits_param_T_7; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_7 = _GEN_0; // @[Parameters.scala:270:42] wire _req_needT_T_4 = _req_needT_T_2 & _req_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _req_needT_T_5 = _req_needT_T_1 | _req_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _GEN_1 = request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _req_needT_T_6; // @[Parameters.scala:271:14] assign _req_needT_T_6 = _GEN_1; // @[Parameters.scala:271:14] wire _req_acquire_T; // @[MSHR.scala:219:36] assign _req_acquire_T = _GEN_1; // @[Parameters.scala:271:14] wire _excluded_client_T_1; // @[Parameters.scala:279:12] assign _excluded_client_T_1 = _GEN_1; // @[Parameters.scala:271:14, :279:12] wire _req_needT_T_7 = &request_opcode; // @[Parameters.scala:271:52] wire _req_needT_T_8 = _req_needT_T_6 | _req_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _req_needT_T_9 = |request_param; // @[Parameters.scala:271:89] wire _req_needT_T_10 = _req_needT_T_8 & _req_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire req_needT = _req_needT_T_5 | _req_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire _req_acquire_T_1 = &request_opcode; // @[Parameters.scala:271:52] wire req_acquire = _req_acquire_T | _req_acquire_T_1; // @[MSHR.scala:219:{36,53,71}] wire meta_no_clients = ~_meta_no_clients_T; // @[MSHR.scala:220:{25,39}] wire _req_promoteT_T = &meta_state; // @[MSHR.scala:100:17, :221:81] wire _req_promoteT_T_1 = meta_no_clients & _req_promoteT_T; // @[MSHR.scala:220:25, :221:{67,81}] wire _req_promoteT_T_2 = meta_hit ? _req_promoteT_T_1 : gotT; // @[MSHR.scala:100:17, :148:17, :221:{40,67}] wire req_promoteT = req_acquire & _req_promoteT_T_2; // @[MSHR.scala:219:53, :221:{34,40}] wire _final_meta_writeback_dirty_T = request_opcode[0]; // @[MSHR.scala:98:20, :224:65] wire _final_meta_writeback_dirty_T_1 = meta_dirty | _final_meta_writeback_dirty_T; // @[MSHR.scala:100:17, :224:{48,65}] wire _final_meta_writeback_state_T = request_param != 3'h3; // @[MSHR.scala:98:20, :225:55] wire _GEN_2 = meta_state == 2'h2; // @[MSHR.scala:100:17, :225:78] wire _final_meta_writeback_state_T_1; // @[MSHR.scala:225:78] assign _final_meta_writeback_state_T_1 = _GEN_2; // @[MSHR.scala:225:78] wire _final_meta_writeback_state_T_12; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_12 = _GEN_2; // @[MSHR.scala:225:78, :240:70] wire _evict_T_2; // @[MSHR.scala:317:26] assign _evict_T_2 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _before_T_1; // @[MSHR.scala:317:26] assign _before_T_1 = _GEN_2; // @[MSHR.scala:225:78, :317:26] wire _final_meta_writeback_state_T_2 = _final_meta_writeback_state_T & _final_meta_writeback_state_T_1; // @[MSHR.scala:225:{55,64,78}] wire [1:0] _final_meta_writeback_state_T_3 = _final_meta_writeback_state_T_2 ? 2'h3 : meta_state; // @[MSHR.scala:100:17, :225:{40,64}] wire _GEN_3 = request_param == 3'h2; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:43] assign _final_meta_writeback_clients_T_1 = _GEN_3; // @[Parameters.scala:282:43] wire _io_schedule_bits_d_bits_param_T_5; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_5 = _GEN_3; // @[Parameters.scala:282:43] wire _final_meta_writeback_clients_T_2 = _final_meta_writeback_clients_T | _final_meta_writeback_clients_T_1; // @[Parameters.scala:282:{11,34,43}] wire _final_meta_writeback_clients_T_3 = request_param == 3'h5; // @[Parameters.scala:282:75] wire _final_meta_writeback_clients_T_4 = _final_meta_writeback_clients_T_2 | _final_meta_writeback_clients_T_3; // @[Parameters.scala:282:{34,66,75}] wire _final_meta_writeback_clients_T_5 = _final_meta_writeback_clients_T_4 & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_6 = ~_final_meta_writeback_clients_T_5; // @[MSHR.scala:226:{52,56}] wire _final_meta_writeback_clients_T_7 = meta_clients & _final_meta_writeback_clients_T_6; // @[MSHR.scala:100:17, :226:{50,52}] wire _final_meta_writeback_clients_T_8 = ~probes_toN; // @[MSHR.scala:151:23, :232:54] wire _final_meta_writeback_clients_T_9 = meta_clients & _final_meta_writeback_clients_T_8; // @[MSHR.scala:100:17, :232:{52,54}] wire _final_meta_writeback_dirty_T_2 = meta_hit & meta_dirty; // @[MSHR.scala:100:17, :236:45] wire _final_meta_writeback_dirty_T_4 = ~_final_meta_writeback_dirty_T_3; // @[MSHR.scala:236:{63,78}] wire _final_meta_writeback_dirty_T_5 = _final_meta_writeback_dirty_T_2 | _final_meta_writeback_dirty_T_4; // @[MSHR.scala:236:{45,60,63}] wire [1:0] _GEN_4 = {1'h1, ~req_acquire}; // @[MSHR.scala:219:53, :238:40] wire [1:0] _final_meta_writeback_state_T_4; // @[MSHR.scala:238:40] assign _final_meta_writeback_state_T_4 = _GEN_4; // @[MSHR.scala:238:40] wire [1:0] _final_meta_writeback_state_T_6; // @[MSHR.scala:239:65] assign _final_meta_writeback_state_T_6 = _GEN_4; // @[MSHR.scala:238:40, :239:65] wire _final_meta_writeback_state_T_5 = ~meta_hit; // @[MSHR.scala:100:17, :239:41] wire [1:0] _final_meta_writeback_state_T_7 = gotT ? _final_meta_writeback_state_T_6 : 2'h1; // @[MSHR.scala:148:17, :239:{55,65}] wire _final_meta_writeback_state_T_8 = meta_no_clients & req_acquire; // @[MSHR.scala:219:53, :220:25, :244:72] wire [1:0] _final_meta_writeback_state_T_9 = {1'h1, ~_final_meta_writeback_state_T_8}; // @[MSHR.scala:244:{55,72}] wire _GEN_5 = meta_state == 2'h1; // @[MSHR.scala:100:17, :240:70] wire _final_meta_writeback_state_T_10; // @[MSHR.scala:240:70] assign _final_meta_writeback_state_T_10 = _GEN_5; // @[MSHR.scala:240:70] wire _io_schedule_bits_c_bits_param_T; // @[MSHR.scala:291:53] assign _io_schedule_bits_c_bits_param_T = _GEN_5; // @[MSHR.scala:240:70, :291:53] wire _evict_T_1; // @[MSHR.scala:317:26] assign _evict_T_1 = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire _before_T; // @[MSHR.scala:317:26] assign _before_T = _GEN_5; // @[MSHR.scala:240:70, :317:26] wire [1:0] _final_meta_writeback_state_T_13 = {_final_meta_writeback_state_T_12, 1'h1}; // @[MSHR.scala:240:70] wire _final_meta_writeback_state_T_14 = &meta_state; // @[MSHR.scala:100:17, :221:81, :240:70] wire [1:0] _final_meta_writeback_state_T_15 = _final_meta_writeback_state_T_14 ? _final_meta_writeback_state_T_9 : _final_meta_writeback_state_T_13; // @[MSHR.scala:240:70, :244:55] wire [1:0] _final_meta_writeback_state_T_16 = _final_meta_writeback_state_T_5 ? _final_meta_writeback_state_T_7 : _final_meta_writeback_state_T_15; // @[MSHR.scala:239:{40,41,55}, :240:70] wire [1:0] _final_meta_writeback_state_T_17 = req_needT ? _final_meta_writeback_state_T_4 : _final_meta_writeback_state_T_16; // @[Parameters.scala:270:70] wire _final_meta_writeback_clients_T_10 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :245:66] wire _final_meta_writeback_clients_T_11 = meta_clients & _final_meta_writeback_clients_T_10; // @[MSHR.scala:100:17, :245:{64,66}] wire _final_meta_writeback_clients_T_12 = meta_hit & _final_meta_writeback_clients_T_11; // @[MSHR.scala:100:17, :245:{40,64}] wire _final_meta_writeback_clients_T_13 = req_acquire & req_clientBit; // @[Parameters.scala:46:9] wire _final_meta_writeback_clients_T_14 = _final_meta_writeback_clients_T_12 | _final_meta_writeback_clients_T_13; // @[MSHR.scala:245:{40,84}, :246:40] assign final_meta_writeback_tag = request_prio_2 | request_control ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :215:38, :223:52, :228:53, :247:30] wire _final_meta_writeback_clients_T_15 = ~probes_toN; // @[MSHR.scala:151:23, :232:54, :258:54] wire _final_meta_writeback_clients_T_16 = meta_clients & _final_meta_writeback_clients_T_15; // @[MSHR.scala:100:17, :258:{52,54}] assign final_meta_writeback_hit = bad_grant ? meta_hit : request_prio_2 | ~request_control; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :227:34, :228:53, :234:30, :248:30, :251:20, :252:21] assign final_meta_writeback_dirty = ~bad_grant & (request_prio_2 ? _final_meta_writeback_dirty_T_1 : request_control ? ~meta_hit & meta_dirty : _final_meta_writeback_dirty_T_5); // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :224:{34,48}, :228:53, :229:21, :230:36, :236:{32,60}, :251:20, :252:21] assign final_meta_writeback_state = bad_grant ? {1'h0, meta_hit} : request_prio_2 ? _final_meta_writeback_state_T_3 : request_control ? (meta_hit ? 2'h0 : meta_state) : _final_meta_writeback_state_T_17; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :225:{34,40}, :228:53, :229:21, :231:36, :237:{32,38}, :251:20, :252:21, :257:36, :263:36] assign final_meta_writeback_clients = bad_grant ? meta_hit & _final_meta_writeback_clients_T_16 : request_prio_2 ? _final_meta_writeback_clients_T_7 : request_control ? (meta_hit ? _final_meta_writeback_clients_T_9 : meta_clients) : _final_meta_writeback_clients_T_14; // @[MSHR.scala:98:20, :100:17, :149:22, :215:38, :223:52, :226:{34,50}, :228:53, :229:21, :232:{36,52}, :245:{34,84}, :251:20, :252:21, :258:{36,52}, :264:36] wire _honour_BtoT_T = meta_clients & req_clientBit; // @[Parameters.scala:46:9] wire _honour_BtoT_T_1 = _honour_BtoT_T; // @[MSHR.scala:276:{47,64}] wire honour_BtoT = meta_hit & _honour_BtoT_T_1; // @[MSHR.scala:100:17, :276:{30,64}] wire _excluded_client_T = meta_hit & request_prio_0; // @[MSHR.scala:98:20, :100:17, :279:38] wire _excluded_client_T_2 = &request_opcode; // @[Parameters.scala:271:52, :279:50] wire _excluded_client_T_3 = _excluded_client_T_1 | _excluded_client_T_2; // @[Parameters.scala:279:{12,40,50}] wire _excluded_client_T_4 = request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _excluded_client_T_5 = _excluded_client_T_3 | _excluded_client_T_4; // @[Parameters.scala:279:{40,77,87}] wire _excluded_client_T_8 = _excluded_client_T_5; // @[Parameters.scala:279:{77,106}] wire _excluded_client_T_9 = _excluded_client_T & _excluded_client_T_8; // @[Parameters.scala:279:106] wire excluded_client = _excluded_client_T_9 & req_clientBit; // @[Parameters.scala:46:9] wire [1:0] _io_schedule_bits_a_bits_param_T = meta_hit ? 2'h2 : 2'h1; // @[MSHR.scala:100:17, :282:56] wire [1:0] _io_schedule_bits_a_bits_param_T_1 = req_needT ? _io_schedule_bits_a_bits_param_T : 2'h0; // @[Parameters.scala:270:70] assign io_schedule_bits_a_bits_param_0 = {1'h0, _io_schedule_bits_a_bits_param_T_1}; // @[MSHR.scala:84:7, :282:{35,41}] wire _io_schedule_bits_a_bits_block_T = request_size != 3'h6; // @[MSHR.scala:98:20, :283:51] wire _io_schedule_bits_a_bits_block_T_1 = request_opcode == 3'h0; // @[MSHR.scala:98:20, :284:55] wire _io_schedule_bits_a_bits_block_T_2 = &request_opcode; // @[Parameters.scala:271:52] wire _io_schedule_bits_a_bits_block_T_3 = _io_schedule_bits_a_bits_block_T_1 | _io_schedule_bits_a_bits_block_T_2; // @[MSHR.scala:284:{55,71,89}] wire _io_schedule_bits_a_bits_block_T_4 = ~_io_schedule_bits_a_bits_block_T_3; // @[MSHR.scala:284:{38,71}] assign _io_schedule_bits_a_bits_block_T_5 = _io_schedule_bits_a_bits_block_T | _io_schedule_bits_a_bits_block_T_4; // @[MSHR.scala:283:{51,91}, :284:38] assign io_schedule_bits_a_bits_block_0 = _io_schedule_bits_a_bits_block_T_5; // @[MSHR.scala:84:7, :283:91] wire _io_schedule_bits_b_bits_param_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :286:42] wire [1:0] _io_schedule_bits_b_bits_param_T_1 = req_needT ? 2'h2 : 2'h1; // @[Parameters.scala:270:70] wire [2:0] _io_schedule_bits_b_bits_param_T_2 = request_prio_1 ? request_param : {1'h0, _io_schedule_bits_b_bits_param_T_1}; // @[MSHR.scala:98:20, :286:{61,97}] assign _io_schedule_bits_b_bits_param_T_3 = _io_schedule_bits_b_bits_param_T ? 3'h2 : _io_schedule_bits_b_bits_param_T_2; // @[MSHR.scala:286:{41,42,61}] assign io_schedule_bits_b_bits_param_0 = _io_schedule_bits_b_bits_param_T_3; // @[MSHR.scala:84:7, :286:41] wire _io_schedule_bits_b_bits_tag_T = ~s_rprobe; // @[MSHR.scala:121:33, :185:31, :287:42] assign _io_schedule_bits_b_bits_tag_T_1 = _io_schedule_bits_b_bits_tag_T ? meta_tag : request_tag; // @[MSHR.scala:98:20, :100:17, :287:{41,42}] assign io_schedule_bits_b_bits_tag_0 = _io_schedule_bits_b_bits_tag_T_1; // @[MSHR.scala:84:7, :287:41] wire _io_schedule_bits_b_bits_clients_T = ~excluded_client; // @[MSHR.scala:279:28, :289:53] assign _io_schedule_bits_b_bits_clients_T_1 = meta_clients & _io_schedule_bits_b_bits_clients_T; // @[MSHR.scala:100:17, :289:{51,53}] assign io_schedule_bits_b_bits_clients_0 = _io_schedule_bits_b_bits_clients_T_1; // @[MSHR.scala:84:7, :289:51] assign _io_schedule_bits_c_bits_opcode_T = {2'h3, meta_dirty}; // @[MSHR.scala:100:17, :290:41] assign io_schedule_bits_c_bits_opcode_0 = _io_schedule_bits_c_bits_opcode_T; // @[MSHR.scala:84:7, :290:41] assign _io_schedule_bits_c_bits_param_T_1 = _io_schedule_bits_c_bits_param_T ? 3'h2 : 3'h1; // @[MSHR.scala:291:{41,53}] assign io_schedule_bits_c_bits_param_0 = _io_schedule_bits_c_bits_param_T_1; // @[MSHR.scala:84:7, :291:41] wire _io_schedule_bits_d_bits_param_T = ~req_acquire; // @[MSHR.scala:219:53, :298:42] wire [1:0] _io_schedule_bits_d_bits_param_T_1 = {1'h0, req_promoteT}; // @[MSHR.scala:221:34, :300:53] wire [1:0] _io_schedule_bits_d_bits_param_T_2 = honour_BtoT ? 2'h2 : 2'h1; // @[MSHR.scala:276:30, :301:53] wire _io_schedule_bits_d_bits_param_T_3 = ~(|request_param); // @[Parameters.scala:271:89] wire [2:0] _io_schedule_bits_d_bits_param_T_4 = _io_schedule_bits_d_bits_param_T_3 ? {1'h0, _io_schedule_bits_d_bits_param_T_1} : request_param; // @[MSHR.scala:98:20, :299:79, :300:53] wire [2:0] _io_schedule_bits_d_bits_param_T_6 = _io_schedule_bits_d_bits_param_T_5 ? {1'h0, _io_schedule_bits_d_bits_param_T_2} : _io_schedule_bits_d_bits_param_T_4; // @[MSHR.scala:299:79, :301:53] wire [2:0] _io_schedule_bits_d_bits_param_T_8 = _io_schedule_bits_d_bits_param_T_7 ? 3'h1 : _io_schedule_bits_d_bits_param_T_6; // @[MSHR.scala:299:79] assign _io_schedule_bits_d_bits_param_T_9 = _io_schedule_bits_d_bits_param_T ? request_param : _io_schedule_bits_d_bits_param_T_8; // @[MSHR.scala:98:20, :298:{41,42}, :299:79] assign io_schedule_bits_d_bits_param_0 = _io_schedule_bits_d_bits_param_T_9; // @[MSHR.scala:84:7, :298:41] wire _io_schedule_bits_dir_bits_data_T = ~s_release; // @[MSHR.scala:124:33, :186:32, :310:42] assign _io_schedule_bits_dir_bits_data_T_1_dirty = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_dirty; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_state = _io_schedule_bits_dir_bits_data_T ? 2'h0 : _io_schedule_bits_dir_bits_data_WIRE_state; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_clients = ~_io_schedule_bits_dir_bits_data_T & _io_schedule_bits_dir_bits_data_WIRE_clients; // @[MSHR.scala:310:{41,42,71}] assign _io_schedule_bits_dir_bits_data_T_1_tag = _io_schedule_bits_dir_bits_data_T ? 13'h0 : _io_schedule_bits_dir_bits_data_WIRE_tag; // @[MSHR.scala:310:{41,42,71}] assign io_schedule_bits_dir_bits_data_dirty_0 = _io_schedule_bits_dir_bits_data_T_1_dirty; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_state_0 = _io_schedule_bits_dir_bits_data_T_1_state; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_clients_0 = _io_schedule_bits_dir_bits_data_T_1_clients; // @[MSHR.scala:84:7, :310:41] assign io_schedule_bits_dir_bits_data_tag_0 = _io_schedule_bits_dir_bits_data_T_1_tag; // @[MSHR.scala:84:7, :310:41] wire _evict_T = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :338:32] wire [3:0] evict; // @[MSHR.scala:314:26] wire _evict_out_T = ~evict_c; // @[MSHR.scala:315:27, :318:32] wire [1:0] _GEN_6 = {1'h1, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32] wire [1:0] _evict_out_T_1; // @[MSHR.scala:319:32] assign _evict_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire [1:0] _before_out_T_1; // @[MSHR.scala:319:32] assign _before_out_T_1 = _GEN_6; // @[MSHR.scala:319:32] wire _evict_T_3 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _GEN_7 = {2'h2, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:39] wire [2:0] _evict_out_T_2; // @[MSHR.scala:320:39] assign _evict_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _before_out_T_2; // @[MSHR.scala:320:39] assign _before_out_T_2 = _GEN_7; // @[MSHR.scala:320:39] wire [2:0] _GEN_8 = {2'h3, ~meta_dirty}; // @[MSHR.scala:100:17, :319:32, :320:76] wire [2:0] _evict_out_T_3; // @[MSHR.scala:320:76] assign _evict_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _before_out_T_3; // @[MSHR.scala:320:76] assign _before_out_T_3 = _GEN_8; // @[MSHR.scala:320:76] wire [2:0] _evict_out_T_4 = evict_c ? _evict_out_T_2 : _evict_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _evict_T_4 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _evict_T_5 = ~_evict_T; // @[MSHR.scala:323:11, :338:32] assign evict = _evict_T_5 ? 4'h8 : _evict_T_1 ? {3'h0, _evict_out_T} : _evict_T_2 ? {2'h0, _evict_out_T_1} : _evict_T_3 ? {1'h0, _evict_out_T_4} : {_evict_T_4, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] before_0; // @[MSHR.scala:314:26] wire _before_out_T = ~before_c; // @[MSHR.scala:315:27, :318:32] wire _before_T_2 = &meta_state; // @[MSHR.scala:100:17, :221:81, :317:26] wire [2:0] _before_out_T_4 = before_c ? _before_out_T_2 : _before_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _before_T_3 = ~(|meta_state); // @[MSHR.scala:100:17, :104:22, :317:26] wire _before_T_4 = ~meta_hit; // @[MSHR.scala:100:17, :239:41, :323:11] assign before_0 = _before_T_4 ? 4'h8 : _before_T ? {3'h0, _before_out_T} : _before_T_1 ? {2'h0, _before_out_T_1} : _before_T_2 ? {1'h0, _before_out_T_4} : {_before_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26, :323:{11,17,23}] wire [3:0] after; // @[MSHR.scala:314:26] wire _GEN_9 = final_meta_writeback_state == 2'h1; // @[MSHR.scala:215:38, :317:26] wire _after_T; // @[MSHR.scala:317:26] assign _after_T = _GEN_9; // @[MSHR.scala:317:26] wire _prior_T; // @[MSHR.scala:317:26] assign _prior_T = _GEN_9; // @[MSHR.scala:317:26] wire _after_out_T = ~after_c; // @[MSHR.scala:315:27, :318:32] wire _GEN_10 = final_meta_writeback_state == 2'h2; // @[MSHR.scala:215:38, :317:26] wire _after_T_1; // @[MSHR.scala:317:26] assign _after_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire _prior_T_1; // @[MSHR.scala:317:26] assign _prior_T_1 = _GEN_10; // @[MSHR.scala:317:26] wire [1:0] _GEN_11 = {1'h1, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32] wire [1:0] _after_out_T_1; // @[MSHR.scala:319:32] assign _after_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire [1:0] _prior_out_T_1; // @[MSHR.scala:319:32] assign _prior_out_T_1 = _GEN_11; // @[MSHR.scala:319:32] wire _after_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _GEN_12 = {2'h2, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:39] wire [2:0] _after_out_T_2; // @[MSHR.scala:320:39] assign _after_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _prior_out_T_2; // @[MSHR.scala:320:39] assign _prior_out_T_2 = _GEN_12; // @[MSHR.scala:320:39] wire [2:0] _GEN_13 = {2'h3, ~final_meta_writeback_dirty}; // @[MSHR.scala:215:38, :319:32, :320:76] wire [2:0] _after_out_T_3; // @[MSHR.scala:320:76] assign _after_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _prior_out_T_3; // @[MSHR.scala:320:76] assign _prior_out_T_3 = _GEN_13; // @[MSHR.scala:320:76] wire [2:0] _after_out_T_4 = after_c ? _after_out_T_2 : _after_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] wire _GEN_14 = final_meta_writeback_state == 2'h0; // @[MSHR.scala:215:38, :317:26] wire _after_T_3; // @[MSHR.scala:317:26] assign _after_T_3 = _GEN_14; // @[MSHR.scala:317:26] wire _prior_T_3; // @[MSHR.scala:317:26] assign _prior_T_3 = _GEN_14; // @[MSHR.scala:317:26] assign after = _after_T ? {3'h0, _after_out_T} : _after_T_1 ? {2'h0, _after_out_T_1} : _after_T_2 ? {1'h0, _after_out_T_4} : {_after_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire probe_bit = io_sinkc_bits_source_0 == 7'h20; // @[Parameters.scala:46:9] wire _GEN_15 = probes_done | probe_bit; // @[Parameters.scala:46:9] wire _last_probe_T; // @[MSHR.scala:459:33] assign _last_probe_T = _GEN_15; // @[MSHR.scala:459:33] wire _probes_done_T; // @[MSHR.scala:467:32] assign _probes_done_T = _GEN_15; // @[MSHR.scala:459:33, :467:32] wire _last_probe_T_1 = ~excluded_client; // @[MSHR.scala:279:28, :289:53, :459:66] wire _last_probe_T_2 = meta_clients & _last_probe_T_1; // @[MSHR.scala:100:17, :459:{64,66}] wire last_probe = _last_probe_T == _last_probe_T_2; // @[MSHR.scala:459:{33,46,64}] wire _probe_toN_T = io_sinkc_bits_param_0 == 3'h1; // @[Parameters.scala:282:11] wire _probe_toN_T_1 = io_sinkc_bits_param_0 == 3'h2; // @[Parameters.scala:282:43] wire _probe_toN_T_2 = _probe_toN_T | _probe_toN_T_1; // @[Parameters.scala:282:{11,34,43}] wire _probe_toN_T_3 = io_sinkc_bits_param_0 == 3'h5; // @[Parameters.scala:282:75] wire probe_toN = _probe_toN_T_2 | _probe_toN_T_3; // @[Parameters.scala:282:{34,66,75}] wire _probes_toN_T = probe_toN & probe_bit; // @[Parameters.scala:46:9] wire _probes_toN_T_1 = probes_toN | _probes_toN_T; // @[MSHR.scala:151:23, :468:{30,35}] wire _probes_noT_T = io_sinkc_bits_param_0 != 3'h3; // @[MSHR.scala:84:7, :469:53] wire _probes_noT_T_1 = probes_noT | _probes_noT_T; // @[MSHR.scala:152:23, :469:{30,53}] wire _w_rprobeackfirst_T = w_rprobeackfirst | last_probe; // @[MSHR.scala:122:33, :459:46, :470:42] wire _GEN_16 = last_probe & io_sinkc_bits_last_0; // @[MSHR.scala:84:7, :459:46, :471:55] wire _w_rprobeacklast_T; // @[MSHR.scala:471:55] assign _w_rprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55] wire _w_pprobeacklast_T; // @[MSHR.scala:473:55] assign _w_pprobeacklast_T = _GEN_16; // @[MSHR.scala:471:55, :473:55] wire _w_rprobeacklast_T_1 = w_rprobeacklast | _w_rprobeacklast_T; // @[MSHR.scala:123:33, :471:{40,55}] wire _w_pprobeackfirst_T = w_pprobeackfirst | last_probe; // @[MSHR.scala:132:33, :459:46, :472:42] wire _w_pprobeacklast_T_1 = w_pprobeacklast | _w_pprobeacklast_T; // @[MSHR.scala:133:33, :473:{40,55}] wire _set_pprobeack_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77] wire _set_pprobeack_T_1 = io_sinkc_bits_last_0 | _set_pprobeack_T; // @[MSHR.scala:84:7, :475:{59,77}] wire set_pprobeack = last_probe & _set_pprobeack_T_1; // @[MSHR.scala:459:46, :475:{36,59}] wire _w_pprobeack_T = w_pprobeack | set_pprobeack; // @[MSHR.scala:134:33, :475:36, :476:32] wire _w_grant_T = ~(|request_offset); // @[MSHR.scala:98:20, :475:77, :490:33] wire _w_grant_T_1 = _w_grant_T | io_sinkd_bits_last_0; // @[MSHR.scala:84:7, :490:{33,41}] wire _gotT_T = io_sinkd_bits_param_0 == 3'h0; // @[MSHR.scala:84:7, :493:35] wire _new_meta_T = io_allocate_valid_0 & io_allocate_bits_repeat_0; // @[MSHR.scala:84:7, :505:40] wire new_meta_dirty = _new_meta_T ? final_meta_writeback_dirty : io_directory_bits_dirty_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [1:0] new_meta_state = _new_meta_T ? final_meta_writeback_state : io_directory_bits_state_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_clients = _new_meta_T ? final_meta_writeback_clients : io_directory_bits_clients_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [12:0] new_meta_tag = _new_meta_T ? final_meta_writeback_tag : io_directory_bits_tag_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_meta_hit = _new_meta_T ? final_meta_writeback_hit : io_directory_bits_hit_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire [2:0] new_meta_way = _new_meta_T ? final_meta_writeback_way : io_directory_bits_way_0; // @[MSHR.scala:84:7, :215:38, :505:{21,40}] wire new_request_prio_0 = io_allocate_valid_0 ? allocate_as_full_prio_0 : request_prio_0; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_1 = io_allocate_valid_0 ? allocate_as_full_prio_1 : request_prio_1; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_prio_2 = io_allocate_valid_0 ? allocate_as_full_prio_2 : request_prio_2; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire new_request_control = io_allocate_valid_0 ? allocate_as_full_control : request_control; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_opcode = io_allocate_valid_0 ? allocate_as_full_opcode : request_opcode; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_param = io_allocate_valid_0 ? allocate_as_full_param : request_param; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [2:0] new_request_size = io_allocate_valid_0 ? allocate_as_full_size : request_size; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [6:0] new_request_source = io_allocate_valid_0 ? allocate_as_full_source : request_source; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [12:0] new_request_tag = io_allocate_valid_0 ? allocate_as_full_tag : request_tag; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_offset = io_allocate_valid_0 ? allocate_as_full_offset : request_offset; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [5:0] new_request_put = io_allocate_valid_0 ? allocate_as_full_put : request_put; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire [9:0] new_request_set = io_allocate_valid_0 ? allocate_as_full_set : request_set; // @[MSHR.scala:84:7, :98:20, :504:34, :506:24] wire _new_needT_T = new_request_opcode[2]; // @[Parameters.scala:269:12] wire _new_needT_T_1 = ~_new_needT_T; // @[Parameters.scala:269:{5,12}] wire _GEN_17 = new_request_opcode == 3'h5; // @[Parameters.scala:270:13] wire _new_needT_T_2; // @[Parameters.scala:270:13] assign _new_needT_T_2 = _GEN_17; // @[Parameters.scala:270:13] wire _new_skipProbe_T_5; // @[Parameters.scala:279:117] assign _new_skipProbe_T_5 = _GEN_17; // @[Parameters.scala:270:13, :279:117] wire _new_needT_T_3 = new_request_param == 3'h1; // @[Parameters.scala:270:42] wire _new_needT_T_4 = _new_needT_T_2 & _new_needT_T_3; // @[Parameters.scala:270:{13,33,42}] wire _new_needT_T_5 = _new_needT_T_1 | _new_needT_T_4; // @[Parameters.scala:269:{5,16}, :270:33] wire _T_615 = new_request_opcode == 3'h6; // @[Parameters.scala:271:14] wire _new_needT_T_6; // @[Parameters.scala:271:14] assign _new_needT_T_6 = _T_615; // @[Parameters.scala:271:14] wire _new_skipProbe_T; // @[Parameters.scala:279:12] assign _new_skipProbe_T = _T_615; // @[Parameters.scala:271:14, :279:12] wire _new_needT_T_7 = &new_request_opcode; // @[Parameters.scala:271:52] wire _new_needT_T_8 = _new_needT_T_6 | _new_needT_T_7; // @[Parameters.scala:271:{14,42,52}] wire _new_needT_T_9 = |new_request_param; // @[Parameters.scala:271:89] wire _new_needT_T_10 = _new_needT_T_8 & _new_needT_T_9; // @[Parameters.scala:271:{42,80,89}] wire new_needT = _new_needT_T_5 | _new_needT_T_10; // @[Parameters.scala:269:16, :270:70, :271:80] wire new_clientBit = new_request_source == 7'h20; // @[Parameters.scala:46:9] wire _new_skipProbe_T_1 = &new_request_opcode; // @[Parameters.scala:271:52, :279:50] wire _new_skipProbe_T_2 = _new_skipProbe_T | _new_skipProbe_T_1; // @[Parameters.scala:279:{12,40,50}] wire _new_skipProbe_T_3 = new_request_opcode == 3'h4; // @[Parameters.scala:279:87] wire _new_skipProbe_T_4 = _new_skipProbe_T_2 | _new_skipProbe_T_3; // @[Parameters.scala:279:{40,77,87}] wire _new_skipProbe_T_7 = _new_skipProbe_T_4; // @[Parameters.scala:279:{77,106}] wire new_skipProbe = _new_skipProbe_T_7 & new_clientBit; // @[Parameters.scala:46:9] wire [3:0] prior; // @[MSHR.scala:314:26] wire _prior_out_T = ~prior_c; // @[MSHR.scala:315:27, :318:32] wire _prior_T_2 = &final_meta_writeback_state; // @[MSHR.scala:215:38, :317:26] wire [2:0] _prior_out_T_4 = prior_c ? _prior_out_T_2 : _prior_out_T_3; // @[MSHR.scala:315:27, :320:{32,39,76}] assign prior = _prior_T ? {3'h0, _prior_out_T} : _prior_T_1 ? {2'h0, _prior_out_T_1} : _prior_T_2 ? {1'h0, _prior_out_T_4} : {_prior_T_3, 3'h0}; // @[MSHR.scala:314:26, :317:26, :318:{26,32}, :319:{26,32}, :320:{26,32}, :321:26] wire _T_574 = io_directory_valid_0 | _new_meta_T; // @[MSHR.scala:84:7, :505:40, :539:28]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_146( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_250 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_25( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [8:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [11:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [8:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [8:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [11:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [8:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_30 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_32 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_46 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_48 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_52 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_54 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_58 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_60 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_64 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_66 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_73 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_75 = 1'h1; // @[Parameters.scala:57:20] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [11:0] _c_first_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_first_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_first_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_first_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_set_wo_ready_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_set_wo_ready_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_opcodes_set_interm_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_opcodes_set_interm_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_sizes_set_interm_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_sizes_set_interm_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_opcodes_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_opcodes_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_opcodes_set_T = 12'h0; // @[Monitor.scala:767:79] wire [11:0] _c_sizes_set_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_sizes_set_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_sizes_set_T = 12'h0; // @[Monitor.scala:768:77] wire [11:0] _c_probe_ack_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_probe_ack_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _c_probe_ack_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _c_probe_ack_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_1_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_2_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_3_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [11:0] _same_cycle_resp_WIRE_4_bits_address = 12'h0; // @[Bundles.scala:265:74] wire [11:0] _same_cycle_resp_WIRE_5_bits_address = 12'h0; // @[Bundles.scala:265:61] wire [8:0] _c_first_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_first_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_first_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_first_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_set_wo_ready_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_set_wo_ready_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_set_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_set_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_opcodes_set_interm_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_opcodes_set_interm_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_sizes_set_interm_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_sizes_set_interm_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_opcodes_set_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_opcodes_set_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_sizes_set_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_sizes_set_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_probe_ack_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_probe_ack_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_probe_ack_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_probe_ack_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _same_cycle_resp_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _same_cycle_resp_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _same_cycle_resp_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _same_cycle_resp_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _same_cycle_resp_WIRE_4_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _same_cycle_resp_WIRE_5_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [4098:0] _c_opcodes_set_T_1 = 4099'h0; // @[Monitor.scala:767:54] wire [4098:0] _c_sizes_set_T_1 = 4099'h0; // @[Monitor.scala:768:52] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [511:0] _c_set_wo_ready_T = 512'h1; // @[OneHot.scala:58:35] wire [511:0] _c_set_T = 512'h1; // @[OneHot.scala:58:35] wire [1027:0] c_opcodes_set = 1028'h0; // @[Monitor.scala:740:34] wire [1027:0] c_sizes_set = 1028'h0; // @[Monitor.scala:741:34] wire [256:0] c_set = 257'h0; // @[Monitor.scala:738:34] wire [256:0] c_set_wo_ready = 257'h0; // @[Monitor.scala:739:34] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [8:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 9'h90; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [6:0] _source_ok_T_1 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_7 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_13 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_19 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 7'h20; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 7'h21; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 7'h22; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 7'h23; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire _source_ok_T_25 = io_in_a_bits_source_0 == 9'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31] wire _source_ok_T_26 = io_in_a_bits_source_0 == 9'h41; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31] wire _source_ok_T_27 = io_in_a_bits_source_0 == 9'h42; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31] wire [5:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[5:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] _source_ok_T_28 = io_in_a_bits_source_0[8:6]; // @[Monitor.scala:36:7] wire _source_ok_T_29 = _source_ok_T_28 == 3'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_31 = _source_ok_T_29; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_33 = _source_ok_T_31; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_8 = _source_ok_T_33; // @[Parameters.scala:1138:31] wire _source_ok_T_34 = io_in_a_bits_source_0 == 9'h100; // @[Monitor.scala:36:7] wire _source_ok_WIRE_9 = _source_ok_T_34; // @[Parameters.scala:1138:31] wire _source_ok_T_35 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_36 = _source_ok_T_35 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_37 = _source_ok_T_36 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_38 = _source_ok_T_37 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_39 = _source_ok_T_38 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_40 = _source_ok_T_39 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_41 = _source_ok_T_40 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_42 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [11:0] _is_aligned_T = {6'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 12'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] uncommonBits_4 = _uncommonBits_T_4[5:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] uncommonBits_9 = _uncommonBits_T_9[5:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] uncommonBits_14 = _uncommonBits_T_14[5:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] uncommonBits_19 = _uncommonBits_T_19[5:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] uncommonBits_24 = _uncommonBits_T_24[5:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] uncommonBits_29 = _uncommonBits_T_29[5:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] uncommonBits_34 = _uncommonBits_T_34[5:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] uncommonBits_39 = _uncommonBits_T_39[5:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_40 = _uncommonBits_T_40[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_41 = _uncommonBits_T_41[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] uncommonBits_44 = _uncommonBits_T_44[5:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_46 = _uncommonBits_T_46[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_47 = _uncommonBits_T_47[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] uncommonBits_49 = _uncommonBits_T_49[5:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_52 = _uncommonBits_T_52[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_53 = _uncommonBits_T_53[1:0]; // @[Parameters.scala:52:{29,56}] wire [5:0] uncommonBits_54 = _uncommonBits_T_54[5:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_43 = io_in_d_bits_source_0 == 9'h90; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_43; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [6:0] _source_ok_T_44 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_50 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_56 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_62 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire _source_ok_T_45 = _source_ok_T_44 == 7'h20; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_47 = _source_ok_T_45; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_49 = _source_ok_T_47; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_49; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_51 = _source_ok_T_50 == 7'h21; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_53 = _source_ok_T_51; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_55 = _source_ok_T_53; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_55; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_57 = _source_ok_T_56 == 7'h22; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_59 = _source_ok_T_57; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_61 = _source_ok_T_59; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_61; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_63 = _source_ok_T_62 == 7'h23; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_65 = _source_ok_T_63; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_67 = _source_ok_T_65; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_67; // @[Parameters.scala:1138:31] wire _source_ok_T_68 = io_in_d_bits_source_0 == 9'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_5 = _source_ok_T_68; // @[Parameters.scala:1138:31] wire _source_ok_T_69 = io_in_d_bits_source_0 == 9'h41; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_69; // @[Parameters.scala:1138:31] wire _source_ok_T_70 = io_in_d_bits_source_0 == 9'h42; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_70; // @[Parameters.scala:1138:31] wire [5:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[5:0]; // @[Parameters.scala:52:{29,56}] wire [2:0] _source_ok_T_71 = io_in_d_bits_source_0[8:6]; // @[Monitor.scala:36:7] wire _source_ok_T_72 = _source_ok_T_71 == 3'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_74 = _source_ok_T_72; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_76 = _source_ok_T_74; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_8 = _source_ok_T_76; // @[Parameters.scala:1138:31] wire _source_ok_T_77 = io_in_d_bits_source_0 == 9'h100; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_9 = _source_ok_T_77; // @[Parameters.scala:1138:31] wire _source_ok_T_78 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_79 = _source_ok_T_78 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_80 = _source_ok_T_79 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_81 = _source_ok_T_80 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_82 = _source_ok_T_81 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_83 = _source_ok_T_82 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_84 = _source_ok_T_83 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_85 = _source_ok_T_84 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_85 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46] wire _T_1180 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1180; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1180; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [8:0] source; // @[Monitor.scala:390:22] reg [11:0] address; // @[Monitor.scala:391:22] wire _T_1248 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1248; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1248; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1248; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [8:0] source_1; // @[Monitor.scala:541:22] reg [256:0] inflight; // @[Monitor.scala:614:27] reg [1027:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [1027:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [256:0] a_set; // @[Monitor.scala:626:34] wire [256:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [1027:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [1027:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [11:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [11:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [11:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [11:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [11:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [11:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [11:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [11:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [11:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [1027:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [1027:0] _a_opcode_lookup_T_6 = {1024'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [1027:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[1027:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [1027:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [1027:0] _a_size_lookup_T_6 = {1024'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [1027:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[1027:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [511:0] _GEN_2 = 512'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [511:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [511:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[256:0] : 257'h0; // @[OneHot.scala:58:35] wire _T_1113 = _T_1180 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1113 ? _a_set_T[256:0] : 257'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1113 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1113 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [11:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [11:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [11:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [4098:0] _a_opcodes_set_T_1 = {4095'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1113 ? _a_opcodes_set_T_1[1027:0] : 1028'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [4098:0] _a_sizes_set_T_1 = {4095'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1113 ? _a_sizes_set_T_1[1027:0] : 1028'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [256:0] d_clr; // @[Monitor.scala:664:34] wire [256:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [1027:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [1027:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_1159 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [511:0] _GEN_5 = 512'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [511:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [511:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [511:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [511:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1159 & ~d_release_ack ? _d_clr_wo_ready_T[256:0] : 257'h0; // @[OneHot.scala:58:35] wire _T_1128 = _T_1248 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1128 ? _d_clr_T[256:0] : 257'h0; // @[OneHot.scala:58:35] wire [4110:0] _d_opcodes_clr_T_5 = 4111'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1128 ? _d_opcodes_clr_T_5[1027:0] : 1028'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [4110:0] _d_sizes_clr_T_5 = 4111'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1128 ? _d_sizes_clr_T_5[1027:0] : 1028'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [256:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [256:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [256:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [1027:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [1027:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [1027:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [1027:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [1027:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [1027:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [256:0] inflight_1; // @[Monitor.scala:726:35] wire [256:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [1027:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [1027:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [1027:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [1027:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [1027:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [1027:0] _c_opcode_lookup_T_6 = {1024'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [1027:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[1027:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [1027:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [1027:0] _c_size_lookup_T_6 = {1024'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [1027:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[1027:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [256:0] d_clr_1; // @[Monitor.scala:774:34] wire [256:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [1027:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [1027:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1224 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1224 & d_release_ack_1 ? _d_clr_wo_ready_T_1[256:0] : 257'h0; // @[OneHot.scala:58:35] wire _T_1206 = _T_1248 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1206 ? _d_clr_T_1[256:0] : 257'h0; // @[OneHot.scala:58:35] wire [4110:0] _d_opcodes_clr_T_11 = 4111'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1206 ? _d_opcodes_clr_T_11[1027:0] : 1028'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [4110:0] _d_sizes_clr_T_11 = 4111'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1206 ? _d_sizes_clr_T_11[1027:0] : 1028'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 9'h0; // @[Monitor.scala:36:7, :795:113] wire [256:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [256:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [1027:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [1027:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [1027:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [1027:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File IngressUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ class IngressUnit( ingressNodeId: Int, cParam: IngressChannelParams, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams], combineRCVA: Boolean, combineSAST: Boolean, ) (implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) { class IngressUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) { val in = Flipped(Decoupled(new IngressFlit(cParam.payloadBits))) } val io = IO(new IngressUnitIO) val route_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2)) val route_q = Module(new Queue(new RouteComputerResp(outParams, egressParams), 2, flow=combineRCVA)) assert(!(io.in.valid && !cParam.possibleFlows.toSeq.map(_.egressId.U === io.in.bits.egress_id).orR)) route_buffer.io.enq.bits.head := io.in.bits.head route_buffer.io.enq.bits.tail := io.in.bits.tail val flows = cParam.possibleFlows.toSeq if (flows.size == 0) { route_buffer.io.enq.bits.flow := DontCare } else { route_buffer.io.enq.bits.flow.ingress_node := cParam.destId.U route_buffer.io.enq.bits.flow.ingress_node_id := ingressNodeId.U route_buffer.io.enq.bits.flow.vnet_id := cParam.vNetId.U route_buffer.io.enq.bits.flow.egress_node := Mux1H( flows.map(_.egressId.U === io.in.bits.egress_id), flows.map(_.egressNode.U) ) route_buffer.io.enq.bits.flow.egress_node_id := Mux1H( flows.map(_.egressId.U === io.in.bits.egress_id), flows.map(_.egressNodeId.U) ) } route_buffer.io.enq.bits.payload := io.in.bits.payload route_buffer.io.enq.bits.virt_channel_id := DontCare io.router_req.bits.src_virt_id := 0.U io.router_req.bits.flow := route_buffer.io.enq.bits.flow val at_dest = route_buffer.io.enq.bits.flow.egress_node === nodeId.U route_buffer.io.enq.valid := io.in.valid && ( io.router_req.ready || !io.in.bits.head || at_dest) io.router_req.valid := io.in.valid && route_buffer.io.enq.ready && io.in.bits.head && !at_dest io.in.ready := route_buffer.io.enq.ready && ( io.router_req.ready || !io.in.bits.head || at_dest) route_q.io.enq.valid := io.router_req.fire route_q.io.enq.bits := io.router_resp when (io.in.fire && io.in.bits.head && at_dest) { route_q.io.enq.valid := true.B route_q.io.enq.bits.vc_sel.foreach(_.foreach(_ := false.B)) for (o <- 0 until nEgress) { when (egressParams(o).egressId.U === io.in.bits.egress_id) { route_q.io.enq.bits.vc_sel(o+nOutputs)(0) := true.B } } } assert(!(route_q.io.enq.valid && !route_q.io.enq.ready)) val vcalloc_buffer = Module(new Queue(new Flit(cParam.payloadBits), 2)) val vcalloc_q = Module(new Queue(new VCAllocResp(outParams, egressParams), 1, pipe=true)) vcalloc_buffer.io.enq.bits := route_buffer.io.deq.bits io.vcalloc_req.bits.vc_sel := route_q.io.deq.bits.vc_sel io.vcalloc_req.bits.flow := route_buffer.io.deq.bits.flow io.vcalloc_req.bits.in_vc := 0.U val head = route_buffer.io.deq.bits.head val tail = route_buffer.io.deq.bits.tail vcalloc_buffer.io.enq.valid := (route_buffer.io.deq.valid && (route_q.io.deq.valid || !head) && (io.vcalloc_req.ready || !head) ) io.vcalloc_req.valid := (route_buffer.io.deq.valid && route_q.io.deq.valid && head && vcalloc_buffer.io.enq.ready && vcalloc_q.io.enq.ready) route_buffer.io.deq.ready := (vcalloc_buffer.io.enq.ready && (route_q.io.deq.valid || !head) && (io.vcalloc_req.ready || !head) && (vcalloc_q.io.enq.ready || !head)) route_q.io.deq.ready := (route_buffer.io.deq.fire && tail) vcalloc_q.io.enq.valid := io.vcalloc_req.fire vcalloc_q.io.enq.bits := io.vcalloc_resp assert(!(vcalloc_q.io.enq.valid && !vcalloc_q.io.enq.ready)) io.salloc_req(0).bits.vc_sel := vcalloc_q.io.deq.bits.vc_sel io.salloc_req(0).bits.tail := vcalloc_buffer.io.deq.bits.tail val c = (vcalloc_q.io.deq.bits.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U val vcalloc_tail = vcalloc_buffer.io.deq.bits.tail io.salloc_req(0).valid := vcalloc_buffer.io.deq.valid && vcalloc_q.io.deq.valid && c && !io.block vcalloc_buffer.io.deq.ready := io.salloc_req(0).ready && vcalloc_q.io.deq.valid && c && !io.block vcalloc_q.io.deq.ready := vcalloc_tail && vcalloc_buffer.io.deq.fire val out_bundle = if (combineSAST) { Wire(Valid(new SwitchBundle(outParams, egressParams))) } else { Reg(Valid(new SwitchBundle(outParams, egressParams))) } io.out(0) := out_bundle out_bundle.valid := vcalloc_buffer.io.deq.fire out_bundle.bits.flit := vcalloc_buffer.io.deq.bits out_bundle.bits.flit.virt_channel_id := 0.U val out_channel_oh = vcalloc_q.io.deq.bits.vc_sel.map(_.reduce(_||_)).toSeq out_bundle.bits.out_virt_channel := Mux1H(out_channel_oh, vcalloc_q.io.deq.bits.vc_sel.map(v => OHToUInt(v)).toSeq) io.debug.va_stall := io.vcalloc_req.valid && !io.vcalloc_req.ready io.debug.sa_stall := io.salloc_req(0).valid && !io.salloc_req(0).ready // TODO: We should not generate input/ingress/output/egress units for untraversable channels if (!cParam.traversable) { io.in.ready := false.B io.router_req.valid := false.B io.router_req.bits := DontCare io.vcalloc_req.valid := false.B io.vcalloc_req.bits := DontCare io.salloc_req.foreach(_.valid := false.B) io.salloc_req.foreach(_.bits := DontCare) io.out.foreach(_.valid := false.B) io.out.foreach(_.bits := DontCare) } }
module IngressUnit_20( // @[IngressUnit.scala:11:7] input clock, // @[IngressUnit.scala:11:7] input reset, // @[IngressUnit.scala:11:7] output [3:0] io_router_req_bits_flow_egress_node, // @[IngressUnit.scala:24:14] output [2:0] io_router_req_bits_flow_egress_node_id, // @[IngressUnit.scala:24:14] input io_router_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14] input io_router_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14] input io_router_resp_vc_sel_0_2, // @[IngressUnit.scala:24:14] input io_router_resp_vc_sel_0_3, // @[IngressUnit.scala:24:14] input io_router_resp_vc_sel_0_4, // @[IngressUnit.scala:24:14] input io_router_resp_vc_sel_0_5, // @[IngressUnit.scala:24:14] input io_router_resp_vc_sel_0_6, // @[IngressUnit.scala:24:14] input io_router_resp_vc_sel_0_7, // @[IngressUnit.scala:24:14] input io_router_resp_vc_sel_0_8, // @[IngressUnit.scala:24:14] input io_router_resp_vc_sel_0_9, // @[IngressUnit.scala:24:14] input io_vcalloc_req_ready, // @[IngressUnit.scala:24:14] output io_vcalloc_req_valid, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_3_0, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_8, // @[IngressUnit.scala:24:14] output io_vcalloc_req_bits_vc_sel_0_9, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_3_0, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_2_0, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_1_0, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_0, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_1, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_2, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_3, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_4, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_5, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_6, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_7, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_8, // @[IngressUnit.scala:24:14] input io_vcalloc_resp_vc_sel_0_9, // @[IngressUnit.scala:24:14] input io_out_credit_available_3_0, // @[IngressUnit.scala:24:14] input io_out_credit_available_2_0, // @[IngressUnit.scala:24:14] input io_out_credit_available_1_0, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_0, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_1, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_3, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_4, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_5, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_7, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_8, // @[IngressUnit.scala:24:14] input io_out_credit_available_0_9, // @[IngressUnit.scala:24:14] input io_salloc_req_0_ready, // @[IngressUnit.scala:24:14] output io_salloc_req_0_valid, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_3_0, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_2_0, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_1_0, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_0, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_1, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_2, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_3, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_4, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_5, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_6, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_7, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_8, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_vc_sel_0_9, // @[IngressUnit.scala:24:14] output io_salloc_req_0_bits_tail, // @[IngressUnit.scala:24:14] output io_out_0_valid, // @[IngressUnit.scala:24:14] output io_out_0_bits_flit_head, // @[IngressUnit.scala:24:14] output io_out_0_bits_flit_tail, // @[IngressUnit.scala:24:14] output [72:0] io_out_0_bits_flit_payload, // @[IngressUnit.scala:24:14] output [2:0] io_out_0_bits_flit_flow_vnet_id, // @[IngressUnit.scala:24:14] output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[IngressUnit.scala:24:14] output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[IngressUnit.scala:24:14] output [3:0] io_out_0_bits_flit_flow_egress_node, // @[IngressUnit.scala:24:14] output [2:0] io_out_0_bits_flit_flow_egress_node_id, // @[IngressUnit.scala:24:14] output [3:0] io_out_0_bits_out_virt_channel, // @[IngressUnit.scala:24:14] output io_in_ready, // @[IngressUnit.scala:24:14] input io_in_valid, // @[IngressUnit.scala:24:14] input io_in_bits_head, // @[IngressUnit.scala:24:14] input io_in_bits_tail, // @[IngressUnit.scala:24:14] input [72:0] io_in_bits_payload, // @[IngressUnit.scala:24:14] input [3:0] io_in_bits_egress_id // @[IngressUnit.scala:24:14] ); wire _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_valid; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_3_0; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_2_0; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_1_0; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_0; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_1; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_2; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_3; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_4; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_5; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_6; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_7; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_8; // @[IngressUnit.scala:76:25] wire _vcalloc_q_io_deq_bits_vc_sel_0_9; // @[IngressUnit.scala:76:25] wire _vcalloc_buffer_io_enq_ready; // @[IngressUnit.scala:75:30] wire _vcalloc_buffer_io_deq_valid; // @[IngressUnit.scala:75:30] wire _vcalloc_buffer_io_deq_bits_head; // @[IngressUnit.scala:75:30] wire _vcalloc_buffer_io_deq_bits_tail; // @[IngressUnit.scala:75:30] wire [72:0] _vcalloc_buffer_io_deq_bits_payload; // @[IngressUnit.scala:75:30] wire [2:0] _vcalloc_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:75:30] wire [3:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:75:30] wire [2:0] _vcalloc_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:75:30] wire [3:0] _vcalloc_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:75:30] wire [2:0] _vcalloc_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:75:30] wire _route_q_io_enq_ready; // @[IngressUnit.scala:27:23] wire _route_q_io_deq_valid; // @[IngressUnit.scala:27:23] wire _route_buffer_io_enq_ready; // @[IngressUnit.scala:26:28] wire _route_buffer_io_deq_valid; // @[IngressUnit.scala:26:28] wire _route_buffer_io_deq_bits_head; // @[IngressUnit.scala:26:28] wire _route_buffer_io_deq_bits_tail; // @[IngressUnit.scala:26:28] wire [72:0] _route_buffer_io_deq_bits_payload; // @[IngressUnit.scala:26:28] wire [2:0] _route_buffer_io_deq_bits_flow_vnet_id; // @[IngressUnit.scala:26:28] wire [3:0] _route_buffer_io_deq_bits_flow_ingress_node; // @[IngressUnit.scala:26:28] wire [2:0] _route_buffer_io_deq_bits_flow_ingress_node_id; // @[IngressUnit.scala:26:28] wire [3:0] _route_buffer_io_deq_bits_flow_egress_node; // @[IngressUnit.scala:26:28] wire [2:0] _route_buffer_io_deq_bits_flow_egress_node_id; // @[IngressUnit.scala:26:28] wire [3:0] _route_buffer_io_deq_bits_virt_channel_id; // @[IngressUnit.scala:26:28] wire _route_buffer_io_enq_bits_flow_egress_node_id_T_5 = io_in_bits_egress_id == 4'h3; // @[IngressUnit.scala:30:72] wire _route_buffer_io_enq_bits_flow_egress_node_id_T_6 = io_in_bits_egress_id == 4'h1; // @[IngressUnit.scala:30:72] wire _route_buffer_io_enq_bits_flow_egress_node_id_T_7 = io_in_bits_egress_id == 4'h9; // @[IngressUnit.scala:30:72] wire _route_buffer_io_enq_bits_flow_egress_node_id_T_8 = io_in_bits_egress_id == 4'h7; // @[IngressUnit.scala:30:72] wire _route_buffer_io_enq_bits_flow_egress_node_id_T_9 = io_in_bits_egress_id == 4'h5; // @[IngressUnit.scala:30:72] wire [1:0] _GEN = {2{_route_buffer_io_enq_bits_flow_egress_node_id_T_7}} | {_route_buffer_io_enq_bits_flow_egress_node_id_T_8, 1'h0}; // @[Mux.scala:30:73] wire _GEN_0 = _GEN[0] | _route_buffer_io_enq_bits_flow_egress_node_id_T_9; // @[Mux.scala:30:73] wire [3:0] route_buffer_io_enq_bits_flow_egress_node = {1'h0, _route_buffer_io_enq_bits_flow_egress_node_id_T_6, _GEN[1], _GEN_0}; // @[Mux.scala:30:73] wire [2:0] route_buffer_io_enq_bits_flow_egress_node_id = {2'h0, _route_buffer_io_enq_bits_flow_egress_node_id_T_5 | _route_buffer_io_enq_bits_flow_egress_node_id_T_6 | _route_buffer_io_enq_bits_flow_egress_node_id_T_7 | _route_buffer_io_enq_bits_flow_egress_node_id_T_8 | _route_buffer_io_enq_bits_flow_egress_node_id_T_9}; // @[Mux.scala:30:73] wire [2:0] _GEN_1 = {_route_buffer_io_enq_bits_flow_egress_node_id_T_6, _GEN[1], _GEN_0}; // @[Mux.scala:30:73] wire _GEN_2 = _route_buffer_io_enq_ready & io_in_valid & io_in_bits_head & _GEN_1 == 3'h6; // @[Decoupled.scala:51:35] wire route_q_io_enq_valid = _GEN_2 | io_in_valid & _route_buffer_io_enq_ready & io_in_bits_head & _GEN_1 != 3'h6; // @[Decoupled.scala:51:35] wire io_vcalloc_req_valid_0 = _route_buffer_io_deq_valid & _route_q_io_deq_valid & _route_buffer_io_deq_bits_head & _vcalloc_buffer_io_enq_ready & _vcalloc_q_io_enq_ready; // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :91:{54,78}, :92:{10,41}] wire route_buffer_io_deq_ready = _vcalloc_buffer_io_enq_ready & (_route_q_io_deq_valid | ~_route_buffer_io_deq_bits_head) & (io_vcalloc_req_ready | ~_route_buffer_io_deq_bits_head) & (_vcalloc_q_io_enq_ready | ~_route_buffer_io_deq_bits_head); // @[IngressUnit.scala:26:28, :27:23, :75:30, :76:25, :88:30, :93:61, :94:{27,37}, :95:{27,37}, :96:29] wire vcalloc_q_io_enq_valid = io_vcalloc_req_ready & io_vcalloc_req_valid_0; // @[Decoupled.scala:51:35]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_174( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_195 io_out_source_extend ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag }
module OptimizationBarrier_TLBEntryData_4( // @[package.scala:267:30] input clock, // @[package.scala:267:30] input reset, // @[package.scala:267:30] input [19:0] io_x_ppn, // @[package.scala:268:18] input io_x_u, // @[package.scala:268:18] input io_x_g, // @[package.scala:268:18] input io_x_ae_ptw, // @[package.scala:268:18] input io_x_ae_final, // @[package.scala:268:18] input io_x_ae_stage2, // @[package.scala:268:18] input io_x_pf, // @[package.scala:268:18] input io_x_gf, // @[package.scala:268:18] input io_x_sw, // @[package.scala:268:18] input io_x_sx, // @[package.scala:268:18] input io_x_sr, // @[package.scala:268:18] input io_x_hw, // @[package.scala:268:18] input io_x_hx, // @[package.scala:268:18] input io_x_hr, // @[package.scala:268:18] input io_x_pw, // @[package.scala:268:18] input io_x_px, // @[package.scala:268:18] input io_x_pr, // @[package.scala:268:18] input io_x_ppp, // @[package.scala:268:18] input io_x_pal, // @[package.scala:268:18] input io_x_paa, // @[package.scala:268:18] input io_x_eff, // @[package.scala:268:18] input io_x_c, // @[package.scala:268:18] input io_x_fragmented_superpage, // @[package.scala:268:18] output io_y_u, // @[package.scala:268:18] output io_y_ae_ptw, // @[package.scala:268:18] output io_y_ae_final, // @[package.scala:268:18] output io_y_ae_stage2, // @[package.scala:268:18] output io_y_pf, // @[package.scala:268:18] output io_y_gf, // @[package.scala:268:18] output io_y_sw, // @[package.scala:268:18] output io_y_sx, // @[package.scala:268:18] output io_y_sr, // @[package.scala:268:18] output io_y_hw, // @[package.scala:268:18] output io_y_hx, // @[package.scala:268:18] output io_y_hr, // @[package.scala:268:18] output io_y_pw, // @[package.scala:268:18] output io_y_px, // @[package.scala:268:18] output io_y_pr, // @[package.scala:268:18] output io_y_ppp, // @[package.scala:268:18] output io_y_pal, // @[package.scala:268:18] output io_y_paa, // @[package.scala:268:18] output io_y_eff, // @[package.scala:268:18] output io_y_c // @[package.scala:268:18] ); wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30] wire io_x_u_0 = io_x_u; // @[package.scala:267:30] wire io_x_g_0 = io_x_g; // @[package.scala:267:30] wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30] wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30] wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30] wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30] wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30] wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30] wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30] wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30] wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30] wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30] wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30] wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30] wire io_x_px_0 = io_x_px; // @[package.scala:267:30] wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30] wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30] wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30] wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30] wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30] wire io_x_c_0 = io_x_c; // @[package.scala:267:30] wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30] wire [19:0] io_y_ppn = io_x_ppn_0; // @[package.scala:267:30] wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30] wire io_y_g = io_x_g_0; // @[package.scala:267:30] wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30] wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30] wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30] wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30] wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30] wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30] wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30] wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30] wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30] wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30] wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30] wire io_y_pw_0 = io_x_pw_0; // @[package.scala:267:30] wire io_y_px_0 = io_x_px_0; // @[package.scala:267:30] wire io_y_pr_0 = io_x_pr_0; // @[package.scala:267:30] wire io_y_ppp_0 = io_x_ppp_0; // @[package.scala:267:30] wire io_y_pal_0 = io_x_pal_0; // @[package.scala:267:30] wire io_y_paa_0 = io_x_paa_0; // @[package.scala:267:30] wire io_y_eff_0 = io_x_eff_0; // @[package.scala:267:30] wire io_y_c_0 = io_x_c_0; // @[package.scala:267:30] wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30] assign io_y_u = io_y_u_0; // @[package.scala:267:30] assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30] assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30] assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30] assign io_y_pf = io_y_pf_0; // @[package.scala:267:30] assign io_y_gf = io_y_gf_0; // @[package.scala:267:30] assign io_y_sw = io_y_sw_0; // @[package.scala:267:30] assign io_y_sx = io_y_sx_0; // @[package.scala:267:30] assign io_y_sr = io_y_sr_0; // @[package.scala:267:30] assign io_y_hw = io_y_hw_0; // @[package.scala:267:30] assign io_y_hx = io_y_hx_0; // @[package.scala:267:30] assign io_y_hr = io_y_hr_0; // @[package.scala:267:30] assign io_y_pw = io_y_pw_0; // @[package.scala:267:30] assign io_y_px = io_y_px_0; // @[package.scala:267:30] assign io_y_pr = io_y_pr_0; // @[package.scala:267:30] assign io_y_ppp = io_y_ppp_0; // @[package.scala:267:30] assign io_y_pal = io_y_pal_0; // @[package.scala:267:30] assign io_y_paa = io_y_paa_0; // @[package.scala:267:30] assign io_y_eff = io_y_eff_0; // @[package.scala:267:30] assign io_y_c = io_y_c_0; // @[package.scala:267:30] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag }
module OptimizationBarrier_TLBEntryData_27( // @[package.scala:267:30] input clock, // @[package.scala:267:30] input reset, // @[package.scala:267:30] input [19:0] io_x_ppn, // @[package.scala:268:18] input io_x_u, // @[package.scala:268:18] input io_x_g, // @[package.scala:268:18] input io_x_ae_ptw, // @[package.scala:268:18] input io_x_ae_final, // @[package.scala:268:18] input io_x_ae_stage2, // @[package.scala:268:18] input io_x_pf, // @[package.scala:268:18] input io_x_gf, // @[package.scala:268:18] input io_x_sw, // @[package.scala:268:18] input io_x_sx, // @[package.scala:268:18] input io_x_sr, // @[package.scala:268:18] input io_x_hw, // @[package.scala:268:18] input io_x_hx, // @[package.scala:268:18] input io_x_hr, // @[package.scala:268:18] input io_x_pw, // @[package.scala:268:18] input io_x_px, // @[package.scala:268:18] input io_x_pr, // @[package.scala:268:18] input io_x_ppp, // @[package.scala:268:18] input io_x_pal, // @[package.scala:268:18] input io_x_paa, // @[package.scala:268:18] input io_x_eff, // @[package.scala:268:18] input io_x_c, // @[package.scala:268:18] input io_x_fragmented_superpage, // @[package.scala:268:18] output io_y_u, // @[package.scala:268:18] output io_y_ae_ptw, // @[package.scala:268:18] output io_y_ae_final, // @[package.scala:268:18] output io_y_ae_stage2, // @[package.scala:268:18] output io_y_pf, // @[package.scala:268:18] output io_y_gf, // @[package.scala:268:18] output io_y_sw, // @[package.scala:268:18] output io_y_sx, // @[package.scala:268:18] output io_y_sr, // @[package.scala:268:18] output io_y_hw, // @[package.scala:268:18] output io_y_hx, // @[package.scala:268:18] output io_y_hr // @[package.scala:268:18] ); wire [19:0] io_x_ppn_0 = io_x_ppn; // @[package.scala:267:30] wire io_x_u_0 = io_x_u; // @[package.scala:267:30] wire io_x_g_0 = io_x_g; // @[package.scala:267:30] wire io_x_ae_ptw_0 = io_x_ae_ptw; // @[package.scala:267:30] wire io_x_ae_final_0 = io_x_ae_final; // @[package.scala:267:30] wire io_x_ae_stage2_0 = io_x_ae_stage2; // @[package.scala:267:30] wire io_x_pf_0 = io_x_pf; // @[package.scala:267:30] wire io_x_gf_0 = io_x_gf; // @[package.scala:267:30] wire io_x_sw_0 = io_x_sw; // @[package.scala:267:30] wire io_x_sx_0 = io_x_sx; // @[package.scala:267:30] wire io_x_sr_0 = io_x_sr; // @[package.scala:267:30] wire io_x_hw_0 = io_x_hw; // @[package.scala:267:30] wire io_x_hx_0 = io_x_hx; // @[package.scala:267:30] wire io_x_hr_0 = io_x_hr; // @[package.scala:267:30] wire io_x_pw_0 = io_x_pw; // @[package.scala:267:30] wire io_x_px_0 = io_x_px; // @[package.scala:267:30] wire io_x_pr_0 = io_x_pr; // @[package.scala:267:30] wire io_x_ppp_0 = io_x_ppp; // @[package.scala:267:30] wire io_x_pal_0 = io_x_pal; // @[package.scala:267:30] wire io_x_paa_0 = io_x_paa; // @[package.scala:267:30] wire io_x_eff_0 = io_x_eff; // @[package.scala:267:30] wire io_x_c_0 = io_x_c; // @[package.scala:267:30] wire io_x_fragmented_superpage_0 = io_x_fragmented_superpage; // @[package.scala:267:30] wire [19:0] io_y_ppn = io_x_ppn_0; // @[package.scala:267:30] wire io_y_u_0 = io_x_u_0; // @[package.scala:267:30] wire io_y_g = io_x_g_0; // @[package.scala:267:30] wire io_y_ae_ptw_0 = io_x_ae_ptw_0; // @[package.scala:267:30] wire io_y_ae_final_0 = io_x_ae_final_0; // @[package.scala:267:30] wire io_y_ae_stage2_0 = io_x_ae_stage2_0; // @[package.scala:267:30] wire io_y_pf_0 = io_x_pf_0; // @[package.scala:267:30] wire io_y_gf_0 = io_x_gf_0; // @[package.scala:267:30] wire io_y_sw_0 = io_x_sw_0; // @[package.scala:267:30] wire io_y_sx_0 = io_x_sx_0; // @[package.scala:267:30] wire io_y_sr_0 = io_x_sr_0; // @[package.scala:267:30] wire io_y_hw_0 = io_x_hw_0; // @[package.scala:267:30] wire io_y_hx_0 = io_x_hx_0; // @[package.scala:267:30] wire io_y_hr_0 = io_x_hr_0; // @[package.scala:267:30] wire io_y_pw = io_x_pw_0; // @[package.scala:267:30] wire io_y_px = io_x_px_0; // @[package.scala:267:30] wire io_y_pr = io_x_pr_0; // @[package.scala:267:30] wire io_y_ppp = io_x_ppp_0; // @[package.scala:267:30] wire io_y_pal = io_x_pal_0; // @[package.scala:267:30] wire io_y_paa = io_x_paa_0; // @[package.scala:267:30] wire io_y_eff = io_x_eff_0; // @[package.scala:267:30] wire io_y_c = io_x_c_0; // @[package.scala:267:30] wire io_y_fragmented_superpage = io_x_fragmented_superpage_0; // @[package.scala:267:30] assign io_y_u = io_y_u_0; // @[package.scala:267:30] assign io_y_ae_ptw = io_y_ae_ptw_0; // @[package.scala:267:30] assign io_y_ae_final = io_y_ae_final_0; // @[package.scala:267:30] assign io_y_ae_stage2 = io_y_ae_stage2_0; // @[package.scala:267:30] assign io_y_pf = io_y_pf_0; // @[package.scala:267:30] assign io_y_gf = io_y_gf_0; // @[package.scala:267:30] assign io_y_sw = io_y_sw_0; // @[package.scala:267:30] assign io_y_sx = io_y_sx_0; // @[package.scala:267:30] assign io_y_sr = io_y_sr_0; // @[package.scala:267:30] assign io_y_hw = io_y_hw_0; // @[package.scala:267:30] assign io_y_hx = io_y_hx_0; // @[package.scala:267:30] assign io_y_hr = io_y_hr_0; // @[package.scala:267:30] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_143( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_243 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File FunctionalUnit.scala: package saturn.exu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util._ import freechips.rocketchip.tile._ import saturn.common._ import saturn.insns.{VectorInstruction} abstract class FunctionalUnitIO(implicit p: Parameters) extends CoreBundle()(p) with HasVectorParams { val iss = new Bundle { val valid = Input(Bool()) val op = Input(new ExecuteMicroOp) val ready = Output(Bool()) } val scalar_write = Decoupled(new ScalarWrite) val set_vxsat = Output(Bool()) val set_fflags = Output(Valid(UInt(5.W))) } class PipelinedFunctionalUnitIO(depth: Int)(implicit p: Parameters) extends FunctionalUnitIO { val write = Valid(new VectorWrite(dLen)) val pipe = Input(Vec(depth, Valid(new ExecuteMicroOp))) val pipe0_stall = Output(Bool()) } class IterativeFunctionalUnitIO(implicit p: Parameters) extends FunctionalUnitIO { val write = Decoupled(new VectorWrite(dLen)) val hazard = Output(Valid(new PipeHazard(10))) val acc = Output(Bool()) val tail = Output(Bool()) val busy = Output(Bool()) } trait FunctionalUnitFactory { def insns: Seq[VectorInstruction] def generate(implicit p: Parameters): FunctionalUnit } abstract class FunctionalUnit(implicit p: Parameters) extends CoreModule()(p) with HasVectorParams { val io: FunctionalUnitIO } abstract class PipelinedFunctionalUnit(val depth: Int)(implicit p: Parameters) extends FunctionalUnit()(p) { val io = IO(new PipelinedFunctionalUnitIO(depth)) require (depth > 0) def narrow2_expand(bits: Seq[UInt], eew: UInt, upper: Bool, sext: Bool): Vec[UInt] = { val narrow_eew = (0 until 3).map { eew => Wire(Vec(dLenB >> (eew + 1), UInt((16 << eew).W))) } for (eew <- 0 until 3) { val in_vec = bits.grouped(1 << eew).map(g => VecInit(g).asUInt).toSeq for (i <- 0 until dLenB >> (eew + 1)) { val lo = Mux(upper, in_vec(i + (dLenB >> (eew + 1))), in_vec(i)) val hi = Fill(16 << eew, lo((8 << eew)-1) && sext) narrow_eew(eew)(i) := Cat(hi, lo) } } VecInit(narrow_eew.map(_.asUInt))(eew).asTypeOf(Vec(dLenB, UInt(8.W))) } } abstract class IterativeFunctionalUnit(implicit p: Parameters) extends FunctionalUnit()(p) { val io = IO(new IterativeFunctionalUnitIO) val valid = RegInit(false.B) val op = Reg(new ExecuteMicroOp) val last = Wire(Bool()) io.busy := valid io.hazard.bits.latency := DontCare when (io.iss.valid && io.iss.ready) { valid := true.B op := io.iss.op } .elsewhen (last) { valid := false.B } } File IntegerDivide.scala: package saturn.exu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import freechips.rocketchip.rocket._ import freechips.rocketchip.rocket.ALU._ import freechips.rocketchip.util._ import freechips.rocketchip.tile._ import saturn.common._ import saturn.insns._ case class IntegerDivideFactory(supportsMul: Boolean) extends FunctionalUnitFactory { def mul_insns = Seq( MUL.VV, MUL.VX, MULH.VV, MULH.VX, MULHU.VV, MULHU.VX, MULHSU.VV, MULHSU.VX, WMUL.VV, WMUL.VX, WMULU.VV, WMULU.VX, WMULSU.VV, WMULSU.VX, MACC.VV, MACC.VX, NMSAC.VV, NMSAC.VX, MADD.VV, MADD.VX, NMSUB.VV, NMSUB.VX, WMACC.VV, WMACC.VX, WMACCU.VV, WMACCU.VX, WMACCSU.VV , WMACCSU.VX, WMACCUS.VV, WMACCUS.VX, SMUL.VV, SMUL.VX ).map(_.elementWise) def div_insns = Seq( DIVU.VV, DIVU.VX, DIV.VV, DIV.VX, REMU.VV, REMU.VX, REM.VV, REM.VX ).map(_.elementWise) def insns = (div_insns ++ (if (supportsMul) mul_insns else Nil)) def generate(implicit p: Parameters) = new IterativeIntegerDivider(supportsMul)(p) } class IterativeIntegerDivider(supportsMul: Boolean)(implicit p: Parameters) extends IterativeFunctionalUnit()(p) { val div_insns = IntegerDivideFactory(supportsMul).div_insns val mul_insns = IntegerDivideFactory(supportsMul).mul_insns val div = Module(new MulDiv(MulDivParams(mulUnroll = if (supportsMul) 8 else 0), 64, 1)) // 128 to make smul work io.iss.ready := new VectorDecoder(io.iss.op.funct3, io.iss.op.funct6, 0.U, 0.U, div_insns, Nil).matched && div.io.req.ready && (!valid || last) io.set_vxsat := false.B io.set_fflags.valid := false.B io.set_fflags.bits := DontCare div.io.req.valid := io.iss.valid && io.iss.ready val ctrl_fn = WireInit(VecInit(Seq(FN_DIVU, FN_DIV, FN_REMU, FN_REM))(io.iss.op.funct6(1,0))) val ctrl_sign1 = WireInit(io.iss.op.funct6(0)) val ctrl_sign2 = WireInit(io.iss.op.funct6(0)) val ctrl_swapvdv2 = WireInit(false.B) if (supportsMul) { val mul_ctrl = new VectorDecoder(io.iss.op.funct3, io.iss.op.funct6, 0.U, 0.U, mul_insns, Seq( MULHi, MULSign1, MULSign2, MULSwapVdV2)) when (mul_ctrl.matched) { ctrl_fn := Mux(mul_ctrl.bool(MULHi), Mux(mul_ctrl.bool(MULSign2) && mul_ctrl.bool(MULSign1), FN_MULH, Mux(mul_ctrl.bool(MULSign2), FN_MULHSU, FN_MULHU)), FN_MUL) when (io.iss.op.isOpi) { ctrl_fn := FN_MULH } ctrl_sign1 := mul_ctrl.bool(MULSign1) ctrl_sign2 := mul_ctrl.bool(MULSign2) ctrl_swapvdv2 := mul_ctrl.bool(MULSwapVdV2) } } div.io.req.bits.fn := ctrl_fn div.io.req.bits.in1 := Mux(ctrl_swapvdv2, io.iss.op.rvd_elem, Mux(ctrl_sign2, sextElem(io.iss.op.rvs2_elem, io.iss.op.rvs2_eew), io.iss.op.rvs2_elem)) div.io.req.bits.in2 := Mux(ctrl_sign1, sextElem(io.iss.op.rvs1_elem, io.iss.op.rvs1_eew), io.iss.op.rvs1_elem) div.io.req.bits.dw := DW_64 div.io.req.bits.tag := DontCare div.io.kill := false.B io.hazard.valid := valid io.hazard.bits.eg := op.wvd_eg val write_elem = WireInit(div.io.resp.bits.data) val wdata = VecInit.tabulate(4)({ eew => Fill(dLenB >> eew, write_elem((8<<eew)-1,0)) })(op.rvd_eew) if (supportsMul) { val mul_ctrl = new VectorDecoder(op.funct3, op.funct6, 0.U, 0.U, mul_insns, Seq( MULHi, MULSign1, MULSign2, MULSwapVdV2, MULAccumulate, MULSub)) val is_smul = op.isOpi val prod = div.io.resp.bits.full_data.asSInt val hi = VecInit.tabulate(4)({ eew => prod >> (8 << eew) })(op.vd_eew)(63,0) val lo = VecInit.tabulate(4)({ eew => prod((8 << eew)-1,0)})(op.vd_eew)(63,0) val madd = Mux(mul_ctrl.bool(MULSub), ~lo, lo) + mul_ctrl.bool(MULSub) + Mux(mul_ctrl.bool(MULSwapVdV2), op.rvs2_elem, op.rvd_elem) val rounding_incr = VecInit.tabulate(4)({ eew => RoundingIncrement(op.vxrm, prod((8 << eew)-1,0)) })(op.vd_eew) val smul = VecInit.tabulate(4)({ eew => prod >> ((8 << eew) - 1) })(op.vd_eew) + Cat(0.U(1.W), rounding_incr).asSInt val smul_clip_neg = VecInit.tabulate(4)({ eew => (-1 << ((8 << eew)-1)).S })(op.vd_eew) val smul_clip_pos = VecInit.tabulate(4)({ eew => ((1 << ((8 << eew)-1)) - 1).S })(op.vd_eew) val smul_clip_hi = smul > smul_clip_pos val smul_clip_lo = smul < smul_clip_neg val smul_clipped = Mux(smul_clip_hi, smul_clip_pos, 0.S) | Mux(smul_clip_lo, smul_clip_neg, 0.S) | Mux(!smul_clip_hi && !smul_clip_lo, smul, 0.S) val smul_sat = smul_clip_hi || smul_clip_lo val mul_wdata = WireInit(Mux(mul_ctrl.bool(MULHi), hi, lo)) when (mul_ctrl.bool(MULAccumulate) || is_smul) { mul_wdata := Mux(mul_ctrl.bool(MULAccumulate), madd, 0.U) | Mux(is_smul, smul_clipped.asUInt, 0.U) } when (mul_ctrl.matched) { write_elem := mul_wdata io.set_vxsat := is_smul && smul_sat && io.write.fire && op.wmask =/= 0.U } } div.io.resp.ready := io.write.ready io.write.valid := div.io.resp.valid io.write.bits.eg := op.wvd_eg io.write.bits.mask := FillInterleaved(8, op.wmask) io.write.bits.data := wdata io.scalar_write.valid := false.B io.scalar_write.bits := DontCare last := io.write.fire io.acc := false.B io.tail := false.B }
module IterativeIntegerDivider( // @[IntegerDivide.scala:38:7] input clock, // @[IntegerDivide.scala:38:7] input reset, // @[IntegerDivide.scala:38:7] input io_iss_valid, // @[FunctionalUnit.scala:68:14] input [63:0] io_iss_op_rvs1_elem, // @[FunctionalUnit.scala:68:14] input [63:0] io_iss_op_rvs2_elem, // @[FunctionalUnit.scala:68:14] input [1:0] io_iss_op_rvs1_eew, // @[FunctionalUnit.scala:68:14] input [1:0] io_iss_op_rvs2_eew, // @[FunctionalUnit.scala:68:14] input [1:0] io_iss_op_rvd_eew, // @[FunctionalUnit.scala:68:14] input [15:0] io_iss_op_wmask, // @[FunctionalUnit.scala:68:14] input [4:0] io_iss_op_wvd_eg, // @[FunctionalUnit.scala:68:14] input [2:0] io_iss_op_funct3, // @[FunctionalUnit.scala:68:14] input [5:0] io_iss_op_funct6, // @[FunctionalUnit.scala:68:14] output io_iss_ready, // @[FunctionalUnit.scala:68:14] input io_write_ready, // @[FunctionalUnit.scala:68:14] output io_write_valid, // @[FunctionalUnit.scala:68:14] output [4:0] io_write_bits_eg, // @[FunctionalUnit.scala:68:14] output [127:0] io_write_bits_data, // @[FunctionalUnit.scala:68:14] output [127:0] io_write_bits_mask, // @[FunctionalUnit.scala:68:14] output io_hazard_valid, // @[FunctionalUnit.scala:68:14] output [4:0] io_hazard_bits_eg, // @[FunctionalUnit.scala:68:14] output io_busy // @[FunctionalUnit.scala:68:14] ); wire last; // @[Decoupled.scala:51:35] wire io_iss_ready_0; // @[IntegerDivide.scala:43:{107,127}] wire _div_io_req_ready; // @[IntegerDivide.scala:42:19] wire _div_io_resp_valid; // @[IntegerDivide.scala:42:19] wire [63:0] _div_io_resp_bits_data; // @[IntegerDivide.scala:42:19] wire [3:0][2:0] _GEN = '{3'h6, 3'h7, 3'h4, 3'h5}; reg valid; // @[FunctionalUnit.scala:70:22] reg [1:0] op_rvd_eew; // @[FunctionalUnit.scala:71:15] reg [15:0] op_wmask; // @[FunctionalUnit.scala:71:15] reg [4:0] op_wvd_eg; // @[FunctionalUnit.scala:71:15] wire div_io_req_valid = io_iss_valid & io_iss_ready_0; // @[IntegerDivide.scala:43:{107,127}] wire [2:0] _GEN_0 = ~(io_iss_op_funct6[4:2]); // @[pla.scala:78:21] assign io_iss_ready_0 = (&{_GEN_0[0], _GEN_0[1], _GEN_0[2], io_iss_op_funct6[5], ~(io_iss_op_funct3[0]), io_iss_op_funct3[1]}) & _div_io_req_ready & (~valid | last); // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}] wire [3:0][64:0] _GEN_1 = {{{io_iss_op_rvs2_elem[63], io_iss_op_rvs2_elem}}, {{{33{io_iss_op_rvs2_elem[31]}}, io_iss_op_rvs2_elem[31:0]}}, {{{49{io_iss_op_rvs2_elem[15]}}, io_iss_op_rvs2_elem[15:0]}}, {{{57{io_iss_op_rvs2_elem[7]}}, io_iss_op_rvs2_elem[7:0]}}}; // @[Parameters.scala:363:67, :364:{8,11,29}, :365:13] wire [3:0][64:0] _GEN_2 = {{{io_iss_op_rvs1_elem[63], io_iss_op_rvs1_elem}}, {{{33{io_iss_op_rvs1_elem[31]}}, io_iss_op_rvs1_elem[31:0]}}, {{{49{io_iss_op_rvs1_elem[15]}}, io_iss_op_rvs1_elem[15:0]}}, {{{57{io_iss_op_rvs1_elem[7]}}, io_iss_op_rvs1_elem[7:0]}}}; // @[Parameters.scala:363:67, :364:{8,11,29}, :365:13] wire [3:0][127:0] _GEN_3 = {{{2{_div_io_resp_bits_data}}}, {{2{{2{_div_io_resp_bits_data[31:0]}}}}}, {{2{{2{{2{_div_io_resp_bits_data[15:0]}}}}}}}, {{2{{2{{2{{2{_div_io_resp_bits_data[7:0]}}}}}}}}}}; // @[IntegerDivide.scala:42:19, :88:{48,73}, :122:22] assign last = io_write_ready & _div_io_resp_valid; // @[Decoupled.scala:51:35] always @(posedge clock) begin // @[IntegerDivide.scala:38:7] if (reset) // @[IntegerDivide.scala:38:7] valid <= 1'h0; // @[FunctionalUnit.scala:70:22] else // @[IntegerDivide.scala:38:7] valid <= div_io_req_valid | ~last & valid; // @[Decoupled.scala:51:35] if (div_io_req_valid) begin // @[FunctionalUnit.scala:78:22] op_rvd_eew <= io_iss_op_rvd_eew; // @[FunctionalUnit.scala:71:15] op_wmask <= io_iss_op_wmask; // @[FunctionalUnit.scala:71:15] op_wvd_eg <= io_iss_op_wvd_eg; // @[FunctionalUnit.scala:71:15] end always @(posedge)
Generate the Verilog code corresponding to the following Chisel files. File RouteComputer.scala: package constellation.router import chisel3._ import chisel3.util._ import chisel3.util.experimental.decode.{TruthTable, decoder} import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import freechips.rocketchip.rocket.DecodeLogic import constellation.channel._ import constellation.routing.{FlowRoutingBundle, FlowRoutingInfo} import constellation.noc.{HasNoCParams} class RouteComputerReq(implicit val p: Parameters) extends Bundle with HasNoCParams { val src_virt_id = UInt(virtualChannelBits.W) val flow = new FlowRoutingBundle } class RouteComputerResp( val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) } class RouteComputer( val routerParams: RouterParams, val inParams: Seq[ChannelParams], val outParams: Seq[ChannelParams], val ingressParams: Seq[IngressChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterParams with HasRouterInputParams with HasRouterOutputParams with HasNoCParams { val io = IO(new Bundle { val req = MixedVec(allInParams.map { u => Flipped(Decoupled(new RouteComputerReq)) }) val resp = MixedVec(allInParams.map { u => Output(new RouteComputerResp(outParams, egressParams)) }) }) (io.req zip io.resp).zipWithIndex.map { case ((req, resp), i) => req.ready := true.B if (outParams.size == 0) { assert(!req.valid) resp.vc_sel := DontCare } else { def toUInt(t: (Int, FlowRoutingInfo)): UInt = { val l2 = (BigInt(t._1) << req.bits.flow.vnet_id .getWidth) | t._2.vNetId val l3 = ( l2 << req.bits.flow.ingress_node .getWidth) | t._2.ingressNode val l4 = ( l3 << req.bits.flow.ingress_node_id.getWidth) | t._2.ingressNodeId val l5 = ( l4 << req.bits.flow.egress_node .getWidth) | t._2.egressNode val l6 = ( l5 << req.bits.flow.egress_node_id .getWidth) | t._2.egressNodeId l6.U(req.bits.getWidth.W) } val flow = req.bits.flow val table = allInParams(i).possibleFlows.toSeq.distinct.map { pI => allInParams(i).channelRoutingInfos.map { cI => var row: String = "b" (0 until nOutputs).foreach { o => (0 until outParams(o).nVirtualChannels).foreach { outVId => row = row + (if (routingRelation(cI, outParams(o).channelRoutingInfos(outVId), pI)) "1" else "0") } } ((cI.vc, pI), row) } }.flatten val addr = req.bits.asUInt val width = outParams.map(_.nVirtualChannels).reduce(_+_) val decoded = if (table.size > 0) { val truthTable = TruthTable( table.map { e => (BitPat(toUInt(e._1)), BitPat(e._2)) }, BitPat("b" + "?" * width) ) Reverse(decoder(addr, truthTable)) } else { 0.U(width.W) } var idx = 0 (0 until nAllOutputs).foreach { o => if (o < nOutputs) { (0 until outParams(o).nVirtualChannels).foreach { outVId => resp.vc_sel(o)(outVId) := decoded(idx) idx += 1 } } else { resp.vc_sel(o)(0) := false.B } } } } }
module RouteComputer_6( // @[RouteComputer.scala:29:7] input [3:0] io_req_2_bits_flow_egress_node, // @[RouteComputer.scala:40:14] input [2:0] io_req_2_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14] input [3:0] io_req_1_bits_flow_egress_node, // @[RouteComputer.scala:40:14] input [3:0] io_req_0_bits_src_virt_id, // @[RouteComputer.scala:40:14] input [2:0] io_req_0_bits_flow_vnet_id, // @[RouteComputer.scala:40:14] input [3:0] io_req_0_bits_flow_ingress_node, // @[RouteComputer.scala:40:14] input [2:0] io_req_0_bits_flow_ingress_node_id, // @[RouteComputer.scala:40:14] input [3:0] io_req_0_bits_flow_egress_node, // @[RouteComputer.scala:40:14] input [2:0] io_req_0_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_0, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_1, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_2, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_3, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_4, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_5, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_6, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_7, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_8, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_9, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_0_0, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_0_1, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_0_2, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_0_3, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_0_4, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_0_5, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_0_6, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_0_7, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_0_8, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_0_9, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_0_0, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_0_1, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_0_3, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_0_4, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_0_5, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_0_7, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_0_8, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_0_9 // @[RouteComputer.scala:40:14] ); wire [20:0] decoded_invInputs = ~{io_req_0_bits_src_virt_id, io_req_0_bits_flow_vnet_id, io_req_0_bits_flow_ingress_node, io_req_0_bits_flow_ingress_node_id, io_req_0_bits_flow_egress_node, io_req_0_bits_flow_egress_node_id}; // @[pla.scala:78:21] wire [1:0] _GEN = ~(io_req_1_bits_flow_egress_node[3:2]); // @[pla.scala:78:21] assign io_resp_2_vc_sel_0_0 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_2_vc_sel_0_1 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_2_vc_sel_0_2 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_2_vc_sel_0_3 = &{io_req_2_bits_flow_egress_node_id[0], ~(io_req_2_bits_flow_egress_node[3])}; // @[pla.scala:78:21, :90:45, :98:{53,70}] assign io_resp_2_vc_sel_0_4 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_2_vc_sel_0_5 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_2_vc_sel_0_6 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_2_vc_sel_0_7 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_2_vc_sel_0_8 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_2_vc_sel_0_9 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_1_vc_sel_0_0 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_1_vc_sel_0_1 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_1_vc_sel_0_2 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_1_vc_sel_0_3 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_1_vc_sel_0_4 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_1_vc_sel_0_5 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_1_vc_sel_0_6 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_1_vc_sel_0_7 = &{_GEN[0], _GEN[1]}; // @[pla.scala:78:21, :91:29, :98:{53,70}] assign io_resp_1_vc_sel_0_8 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_1_vc_sel_0_9 = 1'h0; // @[RouteComputer.scala:29:7] assign io_resp_0_vc_sel_0_0 = |{&{decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], decoded_invInputs[2], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], decoded_invInputs[16], decoded_invInputs[17], decoded_invInputs[18], decoded_invInputs[19]}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], decoded_invInputs[2], decoded_invInputs[3], decoded_invInputs[4], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], decoded_invInputs[16], decoded_invInputs[20]}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[0], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], decoded_invInputs[16], decoded_invInputs[18], decoded_invInputs[19]}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[1], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], decoded_invInputs[16], decoded_invInputs[17], decoded_invInputs[20]}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[0], io_req_0_bits_flow_egress_node[1], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], decoded_invInputs[16], decoded_invInputs[20]}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], decoded_invInputs[2], decoded_invInputs[3], io_req_0_bits_flow_egress_node[3], decoded_invInputs[7], io_req_0_bits_flow_ingress_node_id[1], decoded_invInputs[9], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], decoded_invInputs[16], decoded_invInputs[18], decoded_invInputs[19]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_0_1 = |{&{decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], decoded_invInputs[2], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[18], decoded_invInputs[19]}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], decoded_invInputs[2], decoded_invInputs[3], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[20]}, &{decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[1], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[20]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_0_3 = |{&{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[5], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], io_req_0_bits_flow_ingress_node_id[2], decoded_invInputs[10], decoded_invInputs[11], io_req_0_bits_flow_ingress_node[2], decoded_invInputs[13], io_req_0_bits_flow_vnet_id[0], decoded_invInputs[15], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[18], decoded_invInputs[19]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[5], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], io_req_0_bits_flow_ingress_node_id[2], decoded_invInputs[10], decoded_invInputs[11], io_req_0_bits_flow_ingress_node[2], decoded_invInputs[13], io_req_0_bits_flow_vnet_id[0], decoded_invInputs[15], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[20]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[6], io_req_0_bits_flow_ingress_node_id[0], decoded_invInputs[8], decoded_invInputs[9], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[11], io_req_0_bits_flow_ingress_node[2], decoded_invInputs[13], io_req_0_bits_flow_vnet_id[0], decoded_invInputs[15], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[18], decoded_invInputs[19]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[6], io_req_0_bits_flow_ingress_node_id[0], decoded_invInputs[8], decoded_invInputs[9], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[11], io_req_0_bits_flow_ingress_node[2], decoded_invInputs[13], io_req_0_bits_flow_vnet_id[0], decoded_invInputs[15], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[20]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_0_4 = |{&{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], decoded_invInputs[17], decoded_invInputs[18], decoded_invInputs[19]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[3], decoded_invInputs[4], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], decoded_invInputs[20]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[0], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], decoded_invInputs[18], decoded_invInputs[19]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[1], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], decoded_invInputs[17], decoded_invInputs[20]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[0], io_req_0_bits_flow_egress_node[1], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], decoded_invInputs[20]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[3], io_req_0_bits_flow_egress_node[3], io_req_0_bits_flow_ingress_node_id[0], decoded_invInputs[8], decoded_invInputs[9], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], decoded_invInputs[18], decoded_invInputs[19]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_0_5 = |{&{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[18], decoded_invInputs[19]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[3], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[20]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[1], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[20]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_0_7 = |{&{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[5], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], decoded_invInputs[9], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[11], io_req_0_bits_flow_ingress_node[2], decoded_invInputs[13], io_req_0_bits_flow_vnet_id[0], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[18], decoded_invInputs[19]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[5], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], decoded_invInputs[9], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[11], io_req_0_bits_flow_ingress_node[2], decoded_invInputs[13], io_req_0_bits_flow_vnet_id[0], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[0], decoded_invInputs[20]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_0_8 = |{&{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], decoded_invInputs[17], decoded_invInputs[18], decoded_invInputs[19]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[3], decoded_invInputs[4], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], decoded_invInputs[20]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], decoded_invInputs[17], decoded_invInputs[20]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[0], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], decoded_invInputs[18], decoded_invInputs[19]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[0], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], decoded_invInputs[20]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[1], io_req_0_bits_flow_egress_node[2], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], decoded_invInputs[9], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], decoded_invInputs[17], decoded_invInputs[18], decoded_invInputs[19]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[0], io_req_0_bits_flow_egress_node[1], io_req_0_bits_flow_egress_node[2], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], decoded_invInputs[9], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], decoded_invInputs[18], decoded_invInputs[19]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], io_req_0_bits_flow_egress_node[3], decoded_invInputs[7], decoded_invInputs[8], decoded_invInputs[9], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], decoded_invInputs[18], decoded_invInputs[19]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[3], io_req_0_bits_flow_egress_node[3], decoded_invInputs[7], decoded_invInputs[8], decoded_invInputs[9], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], decoded_invInputs[18], decoded_invInputs[19]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[3], io_req_0_bits_flow_egress_node[3], decoded_invInputs[7], decoded_invInputs[8], decoded_invInputs[9], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], decoded_invInputs[20]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_0_9 = |{&{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], io_req_0_bits_src_virt_id[0], decoded_invInputs[18], decoded_invInputs[19]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[2], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], io_req_0_bits_src_virt_id[0], decoded_invInputs[20]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_201( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_369 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_230( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [7:0] io_in_a_0, // @[Tile.scala:17:14] input [19:0] io_in_b_0, // @[Tile.scala:17:14] input [19:0] io_in_d_0, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [2:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [7:0] io_out_a_0, // @[Tile.scala:17:14] output [19:0] io_out_c_0, // @[Tile.scala:17:14] output [19:0] io_out_b_0, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [2:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0, // @[Tile.scala:17:14] output io_bad_dataflow // @[Tile.scala:17:14] ); wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7] wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7] wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] wire io_bad_dataflow_0; // @[Tile.scala:16:7] PE_486 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a (io_in_a_0_0), // @[Tile.scala:16:7] .io_in_b (io_in_b_0_0), // @[Tile.scala:16:7] .io_in_d (io_in_d_0_0), // @[Tile.scala:16:7] .io_out_a (io_out_a_0_0), .io_out_b (io_out_b_0_0), .io_out_c (io_out_c_0_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0), .io_bad_dataflow (io_bad_dataflow_0) ); // @[Tile.scala:42:44] assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7] assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7] assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] assign io_bad_dataflow = io_bad_dataflow_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_70( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [7:0] io_in_a_0, // @[Tile.scala:17:14] input [19:0] io_in_b_0, // @[Tile.scala:17:14] input [19:0] io_in_d_0, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [2:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [7:0] io_out_a_0, // @[Tile.scala:17:14] output [19:0] io_out_c_0, // @[Tile.scala:17:14] output [19:0] io_out_b_0, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [2:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0 // @[Tile.scala:17:14] ); wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7] wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7] wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44] wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] PE_326 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a (io_in_a_0_0), // @[Tile.scala:16:7] .io_in_b (io_in_b_0_0), // @[Tile.scala:16:7] .io_in_d (io_in_d_0_0), // @[Tile.scala:16:7] .io_out_a (io_out_a_0_0), .io_out_b (io_out_b_0_0), .io_out_c (io_out_c_0_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0) ); // @[Tile.scala:42:44] assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7] assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7] assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File RegisterRouter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes} import freechips.rocketchip.resources.{Device, Resource, ResourceBindings} import freechips.rocketchip.prci.{NoCrossing} import freechips.rocketchip.regmapper.{RegField, RegMapper, RegMapperParams, RegMapperInput, RegisterRouter} import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, GenRegDescsAnno} import scala.math.min class TLRegisterRouterExtraBundle(val sourceBits: Int, val sizeBits: Int) extends Bundle { val source = UInt((sourceBits max 1).W) val size = UInt((sizeBits max 1).W) } case object TLRegisterRouterExtra extends ControlKey[TLRegisterRouterExtraBundle]("tlrr_extra") case class TLRegisterRouterExtraField(sourceBits: Int, sizeBits: Int) extends BundleField[TLRegisterRouterExtraBundle](TLRegisterRouterExtra, Output(new TLRegisterRouterExtraBundle(sourceBits, sizeBits)), x => { x.size := 0.U x.source := 0.U }) /** TLRegisterNode is a specialized TL SinkNode that encapsulates MMIO registers. * It provides functionality for describing and outputting metdata about the registers in several formats. * It also provides a concrete implementation of a regmap function that will be used * to wire a map of internal registers associated with this node to the node's interconnect port. */ case class TLRegisterNode( address: Seq[AddressSet], device: Device, deviceKey: String = "reg/control", concurrency: Int = 0, beatBytes: Int = 4, undefZero: Boolean = true, executable: Boolean = false)( implicit valName: ValName) extends SinkNode(TLImp)(Seq(TLSlavePortParameters.v1( Seq(TLSlaveParameters.v1( address = address, resources = Seq(Resource(device, deviceKey)), executable = executable, supportsGet = TransferSizes(1, beatBytes), supportsPutPartial = TransferSizes(1, beatBytes), supportsPutFull = TransferSizes(1, beatBytes), fifoId = Some(0))), // requests are handled in order beatBytes = beatBytes, minLatency = min(concurrency, 1)))) with TLFormatNode // the Queue adds at most one cycle { val size = 1 << log2Ceil(1 + address.map(_.max).max - address.map(_.base).min) require (size >= beatBytes) address.foreach { case a => require (a.widen(size-1).base == address.head.widen(size-1).base, s"TLRegisterNode addresses (${address}) must be aligned to its size ${size}") } // Calling this method causes the matching TL2 bundle to be // configured to route all requests to the listed RegFields. def regmap(mapping: RegField.Map*) = { val (bundleIn, edge) = this.in(0) val a = bundleIn.a val d = bundleIn.d val fields = TLRegisterRouterExtraField(edge.bundle.sourceBits, edge.bundle.sizeBits) +: a.bits.params.echoFields val params = RegMapperParams(log2Up(size/beatBytes), beatBytes, fields) val in = Wire(Decoupled(new RegMapperInput(params))) in.bits.read := a.bits.opcode === TLMessages.Get in.bits.index := edge.addr_hi(a.bits) in.bits.data := a.bits.data in.bits.mask := a.bits.mask Connectable.waiveUnmatched(in.bits.extra, a.bits.echo) match { case (lhs, rhs) => lhs :<= rhs } val a_extra = in.bits.extra(TLRegisterRouterExtra) a_extra.source := a.bits.source a_extra.size := a.bits.size // Invoke the register map builder val out = RegMapper(beatBytes, concurrency, undefZero, in, mapping:_*) // No flow control needed in.valid := a.valid a.ready := in.ready d.valid := out.valid out.ready := d.ready // We must restore the size to enable width adapters to work val d_extra = out.bits.extra(TLRegisterRouterExtra) d.bits := edge.AccessAck(toSource = d_extra.source, lgSize = d_extra.size) // avoid a Mux on the data bus by manually overriding two fields d.bits.data := out.bits.data Connectable.waiveUnmatched(d.bits.echo, out.bits.extra) match { case (lhs, rhs) => lhs :<= rhs } d.bits.opcode := Mux(out.bits.read, TLMessages.AccessAckData, TLMessages.AccessAck) // Tie off unused channels bundleIn.b.valid := false.B bundleIn.c.ready := true.B bundleIn.e.ready := true.B genRegDescsJson(mapping:_*) } def genRegDescsJson(mapping: RegField.Map*): Unit = { // Dump out the register map for documentation purposes. val base = address.head.base val baseHex = s"0x${base.toInt.toHexString}" val name = s"${device.describe(ResourceBindings()).name}.At${baseHex}" val json = GenRegDescsAnno.serialize(base, name, mapping:_*) var suffix = 0 while( ElaborationArtefacts.contains(s"${baseHex}.${suffix}.regmap.json")) { suffix = suffix + 1 } ElaborationArtefacts.add(s"${baseHex}.${suffix}.regmap.json", json) val module = Module.currentModule.get.asInstanceOf[RawModule] GenRegDescsAnno.anno( module, base, mapping:_*) } } /** Mix HasTLControlRegMap into any subclass of RegisterRouter to gain helper functions for attaching a device control register map to TileLink. * - The intended use case is that controlNode will diplomatically publish a SW-visible device's memory-mapped control registers. * - Use the clock crossing helper controlXing to externally connect controlNode to a TileLink interconnect. * - Use the mapping helper function regmap to internally fill out the space of device control registers. */ trait HasTLControlRegMap { this: RegisterRouter => protected val controlNode = TLRegisterNode( address = address, device = device, deviceKey = "reg/control", concurrency = concurrency, beatBytes = beatBytes, undefZero = undefZero, executable = executable) // Externally, this helper should be used to connect the register control port to a bus val controlXing: TLInwardClockCrossingHelper = this.crossIn(controlNode) // Backwards-compatibility default node accessor with no clock crossing lazy val node: TLInwardNode = controlXing(NoCrossing) // Internally, this function should be used to populate the control port with registers protected def regmap(mapping: RegField.Map*): Unit = { controlNode.regmap(mapping:_*) } } File TileClockGater.scala: package chipyard.clocking import chisel3._ import chisel3.util._ import chisel3.experimental.Analog import org.chipsalliance.cde.config._ import freechips.rocketchip.subsystem._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.prci._ import freechips.rocketchip.util._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.devices.tilelink._ import freechips.rocketchip.regmapper._ import freechips.rocketchip.subsystem._ /** This node adds clock gating control registers. * If deploying on a platform which does not support clock gating, deasserting the enable * flag will generate the registers, preserving the same memory map and behavior, but will not * generate any gaters */ class TileClockGater(address: BigInt, beatBytes: Int)(implicit p: Parameters, valName: ValName) extends LazyModule { val device = new SimpleDevice(s"clock-gater", Nil) val clockNode = ClockGroupIdentityNode() val tlNode = TLRegisterNode(Seq(AddressSet(address, 4096-1)), device, "reg/control", beatBytes=beatBytes) lazy val module = new LazyModuleImp(this) { val sources = clockNode.in.head._1.member.data.toSeq val sinks = clockNode.out.head._1.member.elements.toSeq val nSinks = sinks.size val regs = (0 until nSinks).map({i => val sinkName = sinks(i)._1 val reg = withReset(sources(i).reset) { Module(new AsyncResetRegVec(w=1, init=1)) } if (sinkName.contains("tile")) { println(s"${(address+i*4).toString(16)}: Tile $sinkName clock gate") sinks(i)._2.clock := ClockGate(sources(i).clock, reg.io.q.asBool) sinks(i)._2.reset := sources(i).reset } else { sinks(i)._2 := sources(i) } reg }) tlNode.regmap((0 until nSinks).map({i => i*4 -> Seq(RegField.rwReg(1, regs(i).io)) }): _*) } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } }
module TileClockGater( // @[TileClockGater.scala:27:25] input clock, // @[TileClockGater.scala:27:25] input reset, // @[TileClockGater.scala:27:25] output auto_clock_gater_in_1_a_ready, // @[LazyModuleImp.scala:107:25] input auto_clock_gater_in_1_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_clock_gater_in_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_clock_gater_in_1_a_bits_param, // @[LazyModuleImp.scala:107:25] input [1:0] auto_clock_gater_in_1_a_bits_size, // @[LazyModuleImp.scala:107:25] input [10:0] auto_clock_gater_in_1_a_bits_source, // @[LazyModuleImp.scala:107:25] input [20:0] auto_clock_gater_in_1_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_clock_gater_in_1_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_clock_gater_in_1_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_clock_gater_in_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_clock_gater_in_1_d_ready, // @[LazyModuleImp.scala:107:25] output auto_clock_gater_in_1_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_clock_gater_in_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_clock_gater_in_1_d_bits_size, // @[LazyModuleImp.scala:107:25] output [10:0] auto_clock_gater_in_1_d_bits_source, // @[LazyModuleImp.scala:107:25] output [63:0] auto_clock_gater_in_1_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_clock_gater_in_0_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25] input auto_clock_gater_in_0_member_allClocks_uncore_reset, // @[LazyModuleImp.scala:107:25] output auto_clock_gater_out_member_allClocks_uncore_clock, // @[LazyModuleImp.scala:107:25] output auto_clock_gater_out_member_allClocks_uncore_reset // @[LazyModuleImp.scala:107:25] ); wire _out_wofireMux_T_1; // @[RegisterRouter.scala:87:24] wire _regs_0_io_q; // @[TileClockGater.scala:33:53] wire in_bits_read = auto_clock_gater_in_1_a_bits_opcode == 3'h4; // @[RegisterRouter.scala:74:36] wire _out_T_1 = auto_clock_gater_in_1_a_bits_address[11:3] == 9'h0; // @[RegisterRouter.scala:75:19, :87:24] assign _out_wofireMux_T_1 = ~in_bits_read; // @[RegisterRouter.scala:74:36, :87:24] wire [2:0] monitor_io_in_d_bits_opcode = {2'h0, in_bits_read}; // @[RegisterRouter.scala:74:36, :105:19] TLMonitor_56 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (auto_clock_gater_in_1_d_ready), .io_in_a_valid (auto_clock_gater_in_1_a_valid), .io_in_a_bits_opcode (auto_clock_gater_in_1_a_bits_opcode), .io_in_a_bits_param (auto_clock_gater_in_1_a_bits_param), .io_in_a_bits_size (auto_clock_gater_in_1_a_bits_size), .io_in_a_bits_source (auto_clock_gater_in_1_a_bits_source), .io_in_a_bits_address (auto_clock_gater_in_1_a_bits_address), .io_in_a_bits_mask (auto_clock_gater_in_1_a_bits_mask), .io_in_a_bits_corrupt (auto_clock_gater_in_1_a_bits_corrupt), .io_in_d_ready (auto_clock_gater_in_1_d_ready), .io_in_d_valid (auto_clock_gater_in_1_a_valid), .io_in_d_bits_opcode (monitor_io_in_d_bits_opcode), // @[RegisterRouter.scala:105:19] .io_in_d_bits_size (auto_clock_gater_in_1_a_bits_size), .io_in_d_bits_source (auto_clock_gater_in_1_a_bits_source) ); // @[Nodes.scala:27:25] AsyncResetRegVec_w1_i1 regs_0 ( // @[TileClockGater.scala:33:53] .clock (clock), .reset (auto_clock_gater_in_0_member_allClocks_uncore_reset), .io_d (auto_clock_gater_in_1_a_bits_data[0]), // @[RegisterRouter.scala:87:24] .io_q (_regs_0_io_q), .io_en (auto_clock_gater_in_1_a_valid & auto_clock_gater_in_1_d_ready & _out_wofireMux_T_1 & _out_T_1 & auto_clock_gater_in_1_a_bits_mask[0]) // @[RegisterRouter.scala:87:24] ); // @[TileClockGater.scala:33:53] assign auto_clock_gater_in_1_a_ready = auto_clock_gater_in_1_d_ready; // @[TileClockGater.scala:27:25] assign auto_clock_gater_in_1_d_valid = auto_clock_gater_in_1_a_valid; // @[TileClockGater.scala:27:25] assign auto_clock_gater_in_1_d_bits_opcode = monitor_io_in_d_bits_opcode; // @[RegisterRouter.scala:105:19] assign auto_clock_gater_in_1_d_bits_size = auto_clock_gater_in_1_a_bits_size; // @[TileClockGater.scala:27:25] assign auto_clock_gater_in_1_d_bits_source = auto_clock_gater_in_1_a_bits_source; // @[TileClockGater.scala:27:25] assign auto_clock_gater_in_1_d_bits_data = {63'h0, _out_T_1 & _regs_0_io_q}; // @[RegisterRouter.scala:87:24] assign auto_clock_gater_out_member_allClocks_uncore_clock = auto_clock_gater_in_0_member_allClocks_uncore_clock; // @[TileClockGater.scala:27:25] assign auto_clock_gater_out_member_allClocks_uncore_reset = auto_clock_gater_in_0_member_allClocks_uncore_reset; // @[TileClockGater.scala:27:25] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_209( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_226 io_out_source_valid_1 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module PE_258( // @[PE.scala:31:7] input clock, // @[PE.scala:31:7] input reset, // @[PE.scala:31:7] input [7:0] io_in_a, // @[PE.scala:35:14] input [19:0] io_in_b, // @[PE.scala:35:14] input [19:0] io_in_d, // @[PE.scala:35:14] output [7:0] io_out_a, // @[PE.scala:35:14] output [19:0] io_out_b, // @[PE.scala:35:14] output [19:0] io_out_c, // @[PE.scala:35:14] input io_in_control_dataflow, // @[PE.scala:35:14] input io_in_control_propagate, // @[PE.scala:35:14] input [4:0] io_in_control_shift, // @[PE.scala:35:14] output io_out_control_dataflow, // @[PE.scala:35:14] output io_out_control_propagate, // @[PE.scala:35:14] output [4:0] io_out_control_shift, // @[PE.scala:35:14] input [2:0] io_in_id, // @[PE.scala:35:14] output [2:0] io_out_id, // @[PE.scala:35:14] input io_in_last, // @[PE.scala:35:14] output io_out_last, // @[PE.scala:35:14] input io_in_valid, // @[PE.scala:35:14] output io_out_valid, // @[PE.scala:35:14] output io_bad_dataflow // @[PE.scala:35:14] ); wire [19:0] _mac_unit_io_out_d; // @[PE.scala:64:24] wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7] wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7] wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7] wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7] wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7] wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7] wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7] wire io_in_last_0 = io_in_last; // @[PE.scala:31:7] wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7] wire io_bad_dataflow_0 = 1'h0; // @[PE.scala:31:7] wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7] wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37] wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37] wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35] wire [19:0] c1_lo_1 = io_in_d_0; // @[PE.scala:31:7] wire [19:0] c2_lo_1 = io_in_d_0; // @[PE.scala:31:7] wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7] wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7] wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7] wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7] wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7] wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7] wire [19:0] io_out_b_0; // @[PE.scala:31:7] wire [19:0] io_out_c_0; // @[PE.scala:31:7] reg [31:0] c1; // @[PE.scala:70:15] wire [31:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15] wire [31:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38] reg [31:0] c2; // @[PE.scala:71:15] wire [31:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15] wire [31:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38] reg last_s; // @[PE.scala:89:25] wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21] wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25] wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25] wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32] wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32] wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25] wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53] wire [31:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15] wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}] wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25] wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27] wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27] wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_7 = _io_out_c_zeros_T_1 & _io_out_c_zeros_T_6; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}] wire [31:0] _GEN_2 = {27'h0, shift_offset}; // @[PE.scala:91:25] wire [31:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15] wire [31:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30] wire [31:0] _io_out_c_T; // @[Arithmetic.scala:107:15] assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33] wire [32:0] _io_out_c_T_2 = {_io_out_c_T[31], _io_out_c_T} + {{31{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}] wire [31:0] _io_out_c_T_3 = _io_out_c_T_2[31:0]; // @[Arithmetic.scala:107:28] wire [31:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28] wire _io_out_c_T_5 = $signed(_io_out_c_T_4) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33] wire _io_out_c_T_6 = $signed(_io_out_c_T_4) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60] wire [31:0] _io_out_c_T_7 = _io_out_c_T_6 ? 32'hFFF80000 : _io_out_c_T_4; // @[Mux.scala:126:16] wire [31:0] _io_out_c_T_8 = _io_out_c_T_5 ? 32'h7FFFF : _io_out_c_T_7; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_9 = _io_out_c_T_8[19:0]; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37] wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37] wire c1_sign = io_in_d_0[19]; // @[PE.scala:31:7] wire c2_sign = io_in_d_0[19]; // @[PE.scala:31:7] wire [1:0] _GEN_4 = {2{c1_sign}}; // @[Arithmetic.scala:117:26, :118:18] wire [1:0] c1_lo_lo_hi; // @[Arithmetic.scala:118:18] assign c1_lo_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [1:0] c1_lo_hi_hi; // @[Arithmetic.scala:118:18] assign c1_lo_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [1:0] c1_hi_lo_hi; // @[Arithmetic.scala:118:18] assign c1_hi_lo_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [1:0] c1_hi_hi_hi; // @[Arithmetic.scala:118:18] assign c1_hi_hi_hi = _GEN_4; // @[Arithmetic.scala:118:18] wire [2:0] c1_lo_lo = {c1_lo_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c1_lo_hi = {c1_lo_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c1_lo = {c1_lo_hi, c1_lo_lo}; // @[Arithmetic.scala:118:18] wire [2:0] c1_hi_lo = {c1_hi_lo_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c1_hi_hi = {c1_hi_hi_hi, c1_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c1_hi = {c1_hi_hi, c1_hi_lo}; // @[Arithmetic.scala:118:18] wire [11:0] _c1_T = {c1_hi, c1_lo}; // @[Arithmetic.scala:118:18] wire [31:0] _c1_T_1 = {_c1_T, c1_lo_1}; // @[Arithmetic.scala:118:{14,18}] wire [31:0] _c1_T_2 = _c1_T_1; // @[Arithmetic.scala:118:{14,61}] wire [31:0] _c1_WIRE = _c1_T_2; // @[Arithmetic.scala:118:61] wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53] wire [31:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15] wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}] wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_16 = _io_out_c_zeros_T_10 & _io_out_c_zeros_T_15; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}] wire [31:0] _GEN_5 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15] wire [31:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T_1 = _GEN_5; // @[Arithmetic.scala:103:30] wire [31:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15] assign _io_out_c_T_11 = _GEN_5; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33] wire [32:0] _io_out_c_T_13 = {_io_out_c_T_11[31], _io_out_c_T_11} + {{31{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}] wire [31:0] _io_out_c_T_14 = _io_out_c_T_13[31:0]; // @[Arithmetic.scala:107:28] wire [31:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28] wire _io_out_c_T_16 = $signed(_io_out_c_T_15) > 32'sh7FFFF; // @[Arithmetic.scala:107:28, :125:33] wire _io_out_c_T_17 = $signed(_io_out_c_T_15) < -32'sh80000; // @[Arithmetic.scala:107:28, :125:60] wire [31:0] _io_out_c_T_18 = _io_out_c_T_17 ? 32'hFFF80000 : _io_out_c_T_15; // @[Mux.scala:126:16] wire [31:0] _io_out_c_T_19 = _io_out_c_T_16 ? 32'h7FFFF : _io_out_c_T_18; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_20 = _io_out_c_T_19[19:0]; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37] wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37] wire [1:0] _GEN_6 = {2{c2_sign}}; // @[Arithmetic.scala:117:26, :118:18] wire [1:0] c2_lo_lo_hi; // @[Arithmetic.scala:118:18] assign c2_lo_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [1:0] c2_lo_hi_hi; // @[Arithmetic.scala:118:18] assign c2_lo_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [1:0] c2_hi_lo_hi; // @[Arithmetic.scala:118:18] assign c2_hi_lo_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [1:0] c2_hi_hi_hi; // @[Arithmetic.scala:118:18] assign c2_hi_hi_hi = _GEN_6; // @[Arithmetic.scala:118:18] wire [2:0] c2_lo_lo = {c2_lo_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c2_lo_hi = {c2_lo_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c2_lo = {c2_lo_hi, c2_lo_lo}; // @[Arithmetic.scala:118:18] wire [2:0] c2_hi_lo = {c2_hi_lo_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [2:0] c2_hi_hi = {c2_hi_hi_hi, c2_sign}; // @[Arithmetic.scala:117:26, :118:18] wire [5:0] c2_hi = {c2_hi_hi, c2_hi_lo}; // @[Arithmetic.scala:118:18] wire [11:0] _c2_T = {c2_hi, c2_lo}; // @[Arithmetic.scala:118:18] wire [31:0] _c2_T_1 = {_c2_T, c2_lo_1}; // @[Arithmetic.scala:118:{14,18}] wire [31:0] _c2_T_2 = _c2_T_1; // @[Arithmetic.scala:118:{14,61}] wire [31:0] _c2_WIRE = _c2_T_2; // @[Arithmetic.scala:118:61] wire [31:0] _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38] wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5[7:0]; // @[PE.scala:121:38] wire [31:0] _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38] wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7[7:0]; // @[PE.scala:127:38] assign io_out_c_0 = io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? c1[19:0] : c2[19:0]) : io_in_control_propagate_0 ? _io_out_c_T_10 : _io_out_c_T_21; // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :104:16, :111:16, :118:101, :119:30, :120:16, :126:16] assign io_out_b_0 = io_in_control_dataflow_0 ? _mac_unit_io_out_d : io_in_b_0; // @[PE.scala:31:7, :64:24, :102:95, :103:30, :118:101] wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35] wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35] wire [31:0] _GEN_7 = {{12{io_in_d_0[19]}}, io_in_d_0}; // @[PE.scala:31:7, :124:10] wire [31:0] _GEN_8 = {{12{_mac_unit_io_out_d[19]}}, _mac_unit_io_out_d}; // @[PE.scala:64:24, :108:10] always @(posedge clock) begin // @[PE.scala:31:7] if (io_in_valid_0) begin // @[PE.scala:31:7] if (io_in_control_dataflow_0) begin // @[PE.scala:31:7] if (io_in_control_dataflow_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :70:15, :118:101, :119:30, :124:10] c1 <= _GEN_7; // @[PE.scala:70:15, :124:10] if (~io_in_control_dataflow_0 | io_in_control_propagate_0) begin // @[PE.scala:31:7, :71:15, :118:101, :119:30] end else // @[PE.scala:71:15, :118:101, :119:30] c2 <= _GEN_7; // @[PE.scala:71:15, :124:10] end else begin // @[PE.scala:31:7] c1 <= io_in_control_propagate_0 ? _c1_WIRE : _GEN_8; // @[PE.scala:31:7, :70:15, :103:30, :108:10, :109:10, :115:10] c2 <= io_in_control_propagate_0 ? _GEN_8 : _c2_WIRE; // @[PE.scala:31:7, :71:15, :103:30, :108:10, :116:10] end last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25] end always @(posedge) MacUnit_2 mac_unit ( // @[PE.scala:64:24] .clock (clock), .reset (reset), .io_in_a (io_in_a_0), // @[PE.scala:31:7] .io_in_b (io_in_control_dataflow_0 ? (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3) : io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE : _mac_unit_io_in_b_WIRE_1), // @[PE.scala:31:7, :102:95, :103:30, :106:{24,37}, :113:{24,37}, :118:101, :119:30, :121:{24,38}, :127:{24,38}] .io_in_c (io_in_control_dataflow_0 ? {{12{io_in_b_0[19]}}, io_in_b_0} : io_in_control_propagate_0 ? c2 : c1), // @[PE.scala:31:7, :70:15, :71:15, :102:95, :103:30, :107:24, :114:24, :118:101, :122:24] .io_out_d (_mac_unit_io_out_d) ); // @[PE.scala:64:24] assign io_out_a = io_out_a_0; // @[PE.scala:31:7] assign io_out_b = io_out_b_0; // @[PE.scala:31:7] assign io_out_c = io_out_c_0; // @[PE.scala:31:7] assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7] assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7] assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7] assign io_out_id = io_out_id_0; // @[PE.scala:31:7] assign io_out_last = io_out_last_0; // @[PE.scala:31:7] assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7] assign io_bad_dataflow = io_bad_dataflow_0; // @[PE.scala:31:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_28( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [8:0] a_first_counter; // @[Edges.scala:229:27] reg [28:0] address; // @[Monitor.scala:391:22] reg [8:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [1:0] inflight; // @[Monitor.scala:614:27] reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [7:0] inflight_sizes; // @[Monitor.scala:618:33] reg [8:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] reg [8:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire a_set = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire _GEN = io_in_d_valid & d_first_1; // @[Monitor.scala:674:26] wire _GEN_0 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:36:7, :673:46, :674:74] wire d_clr = _GEN & _GEN_0; // @[Monitor.scala:673:46, :674:{26,71,74}] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [1:0] inflight_1; // @[Monitor.scala:726:35] reg [7:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [8:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25] wire d_clr_1 = io_in_d_valid & d_first_2 & io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:36:7, :673:46, :784:26, :788:70] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: package constellation.channel import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy._ import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.util._ import constellation.noc.{HasNoCParams} class NoCMonitor(val cParam: ChannelParams)(implicit val p: Parameters) extends Module with HasNoCParams { val io = IO(new Bundle { val in = Input(new Channel(cParam)) }) val in_flight = RegInit(VecInit(Seq.fill(cParam.nVirtualChannels) { false.B })) for (i <- 0 until cParam.srcSpeedup) { val flit = io.in.flit(i) when (flit.valid) { when (flit.bits.head) { in_flight(flit.bits.virt_channel_id) := true.B assert (!in_flight(flit.bits.virt_channel_id), "Flit head/tail sequencing is broken") } when (flit.bits.tail) { in_flight(flit.bits.virt_channel_id) := false.B } } val possibleFlows = cParam.possibleFlows when (flit.valid && flit.bits.head) { cParam match { case n: ChannelParams => n.virtualChannelParams.zipWithIndex.foreach { case (v,i) => assert(flit.bits.virt_channel_id =/= i.U || v.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } case _ => assert(cParam.possibleFlows.toSeq.map(_.isFlow(flit.bits.flow)).orR) } } } } File Types.scala: package constellation.routing import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Parameters} import constellation.noc.{HasNoCParams} import constellation.channel.{Flit} /** A representation for 1 specific virtual channel in wormhole routing * * @param src the source node * @param vc ID for the virtual channel * @param dst the destination node * @param n_vc the number of virtual channels */ // BEGIN: ChannelRoutingInfo case class ChannelRoutingInfo( src: Int, dst: Int, vc: Int, n_vc: Int ) { // END: ChannelRoutingInfo require (src >= -1 && dst >= -1 && vc >= 0, s"Illegal $this") require (!(src == -1 && dst == -1), s"Illegal $this") require (vc < n_vc, s"Illegal $this") val isIngress = src == -1 val isEgress = dst == -1 } /** Represents the properties of a packet that are relevant for routing * ingressId and egressId uniquely identify a flow, but vnet and dst are used here * to simplify the implementation of routingrelations * * @param ingressId packet's source ingress point * @param egressId packet's destination egress point * @param vNet virtual subnetwork identifier * @param dst packet's destination node ID */ // BEGIN: FlowRoutingInfo case class FlowRoutingInfo( ingressId: Int, egressId: Int, vNetId: Int, ingressNode: Int, ingressNodeId: Int, egressNode: Int, egressNodeId: Int, fifo: Boolean ) { // END: FlowRoutingInfo def isFlow(f: FlowRoutingBundle): Bool = { (f.ingress_node === ingressNode.U && f.egress_node === egressNode.U && f.ingress_node_id === ingressNodeId.U && f.egress_node_id === egressNodeId.U) } def asLiteral(b: FlowRoutingBundle): BigInt = { Seq( (vNetId , b.vnet_id), (ingressNode , b.ingress_node), (ingressNodeId , b.ingress_node_id), (egressNode , b.egress_node), (egressNodeId , b.egress_node_id) ).foldLeft(0)((l, t) => { (l << t._2.getWidth) | t._1 }) } } class FlowRoutingBundle(implicit val p: Parameters) extends Bundle with HasNoCParams { // Instead of tracking ingress/egress ID, track the physical destination id and the offset at the destination // This simplifies the routing tables val vnet_id = UInt(log2Ceil(nVirtualNetworks).W) val ingress_node = UInt(log2Ceil(nNodes).W) val ingress_node_id = UInt(log2Ceil(maxIngressesAtNode).W) val egress_node = UInt(log2Ceil(nNodes).W) val egress_node_id = UInt(log2Ceil(maxEgressesAtNode).W) }
module NoCMonitor_31( // @[Monitor.scala:11:7] input clock, // @[Monitor.scala:11:7] input reset, // @[Monitor.scala:11:7] input io_in_flit_0_valid, // @[Monitor.scala:12:14] input io_in_flit_0_bits_head, // @[Monitor.scala:12:14] input io_in_flit_0_bits_tail, // @[Monitor.scala:12:14] input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[Monitor.scala:12:14] input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[Monitor.scala:12:14] input [3:0] io_in_flit_0_bits_flow_egress_node, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[Monitor.scala:12:14] input [1:0] io_in_flit_0_bits_virt_channel_id // @[Monitor.scala:12:14] ); reg in_flight_0; // @[Monitor.scala:16:26] reg in_flight_1; // @[Monitor.scala:16:26] reg in_flight_2; // @[Monitor.scala:16:26]
Generate the Verilog code corresponding to the following Chisel files. File MulRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (ported from Verilog to Chisel by Andrew Waterman). Copyright 2019, 2020 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulFullRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(new RawFloat(expWidth, sigWidth)) val b = Input(new RawFloat(expWidth, sigWidth)) val invalidExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth*2 - 1)) }) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ val notSigNaN_invalidExc = (io.a.isInf && io.b.isZero) || (io.a.isZero && io.b.isInf) val notNaN_isInfOut = io.a.isInf || io.b.isInf val notNaN_isZeroOut = io.a.isZero || io.b.isZero val notNaN_signOut = io.a.sign ^ io.b.sign val common_sExpOut = io.a.sExp + io.b.sExp - (1<<expWidth).S val common_sigOut = (io.a.sig * io.b.sig)(sigWidth*2 - 1, 0) /*------------------------------------------------------------------------ *------------------------------------------------------------------------*/ io.invalidExc := isSigNaNRawFloat(io.a) || isSigNaNRawFloat(io.b) || notSigNaN_invalidExc io.rawOut.isInf := notNaN_isInfOut io.rawOut.isZero := notNaN_isZeroOut io.rawOut.sExp := common_sExpOut io.rawOut.isNaN := io.a.isNaN || io.b.isNaN io.rawOut.sign := notNaN_signOut io.rawOut.sig := common_sigOut } class MulRawFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(new RawFloat(expWidth, sigWidth)) val b = Input(new RawFloat(expWidth, sigWidth)) val invalidExc = Output(Bool()) val rawOut = Output(new RawFloat(expWidth, sigWidth + 2)) }) val mulFullRaw = Module(new MulFullRawFN(expWidth, sigWidth)) mulFullRaw.io.a := io.a mulFullRaw.io.b := io.b io.invalidExc := mulFullRaw.io.invalidExc io.rawOut := mulFullRaw.io.rawOut io.rawOut.sig := { val sig = mulFullRaw.io.rawOut.sig Cat(sig >> (sigWidth - 2), sig(sigWidth - 3, 0).orR) } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class MulRecFN(expWidth: Int, sigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val a = Input(UInt((expWidth + sigWidth + 1).W)) val b = Input(UInt((expWidth + sigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(Bool()) val out = Output(UInt((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(UInt(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val mulRawFN = Module(new MulRawFN(expWidth, sigWidth)) mulRawFN.io.a := rawFloatFromRecFN(expWidth, sigWidth, io.a) mulRawFN.io.b := rawFloatFromRecFN(expWidth, sigWidth, io.b) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundRawFNToRecFN = Module(new RoundRawFNToRecFN(expWidth, sigWidth, 0)) roundRawFNToRecFN.io.invalidExc := mulRawFN.io.invalidExc roundRawFNToRecFN.io.infiniteExc := false.B roundRawFNToRecFN.io.in := mulRawFN.io.rawOut roundRawFNToRecFN.io.roundingMode := io.roundingMode roundRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundRawFNToRecFN.io.out io.exceptionFlags := roundRawFNToRecFN.io.exceptionFlags }
module MulRawFN_34( // @[MulRecFN.scala:75:7] input io_a_isNaN, // @[MulRecFN.scala:77:16] input io_a_isInf, // @[MulRecFN.scala:77:16] input io_a_isZero, // @[MulRecFN.scala:77:16] input io_a_sign, // @[MulRecFN.scala:77:16] input [9:0] io_a_sExp, // @[MulRecFN.scala:77:16] input [24:0] io_a_sig, // @[MulRecFN.scala:77:16] input io_b_isNaN, // @[MulRecFN.scala:77:16] input io_b_isInf, // @[MulRecFN.scala:77:16] input io_b_isZero, // @[MulRecFN.scala:77:16] input io_b_sign, // @[MulRecFN.scala:77:16] input [9:0] io_b_sExp, // @[MulRecFN.scala:77:16] input [24:0] io_b_sig, // @[MulRecFN.scala:77:16] output io_invalidExc, // @[MulRecFN.scala:77:16] output io_rawOut_isNaN, // @[MulRecFN.scala:77:16] output io_rawOut_isInf, // @[MulRecFN.scala:77:16] output io_rawOut_isZero, // @[MulRecFN.scala:77:16] output io_rawOut_sign, // @[MulRecFN.scala:77:16] output [9:0] io_rawOut_sExp, // @[MulRecFN.scala:77:16] output [26:0] io_rawOut_sig // @[MulRecFN.scala:77:16] ); wire [47:0] _mulFullRaw_io_rawOut_sig; // @[MulRecFN.scala:84:28] wire io_a_isNaN_0 = io_a_isNaN; // @[MulRecFN.scala:75:7] wire io_a_isInf_0 = io_a_isInf; // @[MulRecFN.scala:75:7] wire io_a_isZero_0 = io_a_isZero; // @[MulRecFN.scala:75:7] wire io_a_sign_0 = io_a_sign; // @[MulRecFN.scala:75:7] wire [9:0] io_a_sExp_0 = io_a_sExp; // @[MulRecFN.scala:75:7] wire [24:0] io_a_sig_0 = io_a_sig; // @[MulRecFN.scala:75:7] wire io_b_isNaN_0 = io_b_isNaN; // @[MulRecFN.scala:75:7] wire io_b_isInf_0 = io_b_isInf; // @[MulRecFN.scala:75:7] wire io_b_isZero_0 = io_b_isZero; // @[MulRecFN.scala:75:7] wire io_b_sign_0 = io_b_sign; // @[MulRecFN.scala:75:7] wire [9:0] io_b_sExp_0 = io_b_sExp; // @[MulRecFN.scala:75:7] wire [24:0] io_b_sig_0 = io_b_sig; // @[MulRecFN.scala:75:7] wire [26:0] _io_rawOut_sig_T_3; // @[MulRecFN.scala:93:10] wire io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7] wire io_rawOut_isInf_0; // @[MulRecFN.scala:75:7] wire io_rawOut_isZero_0; // @[MulRecFN.scala:75:7] wire io_rawOut_sign_0; // @[MulRecFN.scala:75:7] wire [9:0] io_rawOut_sExp_0; // @[MulRecFN.scala:75:7] wire [26:0] io_rawOut_sig_0; // @[MulRecFN.scala:75:7] wire io_invalidExc_0; // @[MulRecFN.scala:75:7] wire [25:0] _io_rawOut_sig_T = _mulFullRaw_io_rawOut_sig[47:22]; // @[MulRecFN.scala:84:28, :93:15] wire [21:0] _io_rawOut_sig_T_1 = _mulFullRaw_io_rawOut_sig[21:0]; // @[MulRecFN.scala:84:28, :93:37] wire _io_rawOut_sig_T_2 = |_io_rawOut_sig_T_1; // @[MulRecFN.scala:93:{37,55}] assign _io_rawOut_sig_T_3 = {_io_rawOut_sig_T, _io_rawOut_sig_T_2}; // @[MulRecFN.scala:93:{10,15,55}] assign io_rawOut_sig_0 = _io_rawOut_sig_T_3; // @[MulRecFN.scala:75:7, :93:10] MulFullRawFN_34 mulFullRaw ( // @[MulRecFN.scala:84:28] .io_a_isNaN (io_a_isNaN_0), // @[MulRecFN.scala:75:7] .io_a_isInf (io_a_isInf_0), // @[MulRecFN.scala:75:7] .io_a_isZero (io_a_isZero_0), // @[MulRecFN.scala:75:7] .io_a_sign (io_a_sign_0), // @[MulRecFN.scala:75:7] .io_a_sExp (io_a_sExp_0), // @[MulRecFN.scala:75:7] .io_a_sig (io_a_sig_0), // @[MulRecFN.scala:75:7] .io_b_isNaN (io_b_isNaN_0), // @[MulRecFN.scala:75:7] .io_b_isInf (io_b_isInf_0), // @[MulRecFN.scala:75:7] .io_b_isZero (io_b_isZero_0), // @[MulRecFN.scala:75:7] .io_b_sign (io_b_sign_0), // @[MulRecFN.scala:75:7] .io_b_sExp (io_b_sExp_0), // @[MulRecFN.scala:75:7] .io_b_sig (io_b_sig_0), // @[MulRecFN.scala:75:7] .io_invalidExc (io_invalidExc_0), .io_rawOut_isNaN (io_rawOut_isNaN_0), .io_rawOut_isInf (io_rawOut_isInf_0), .io_rawOut_isZero (io_rawOut_isZero_0), .io_rawOut_sign (io_rawOut_sign_0), .io_rawOut_sExp (io_rawOut_sExp_0), .io_rawOut_sig (_mulFullRaw_io_rawOut_sig) ); // @[MulRecFN.scala:84:28] assign io_invalidExc = io_invalidExc_0; // @[MulRecFN.scala:75:7] assign io_rawOut_isNaN = io_rawOut_isNaN_0; // @[MulRecFN.scala:75:7] assign io_rawOut_isInf = io_rawOut_isInf_0; // @[MulRecFN.scala:75:7] assign io_rawOut_isZero = io_rawOut_isZero_0; // @[MulRecFN.scala:75:7] assign io_rawOut_sign = io_rawOut_sign_0; // @[MulRecFN.scala:75:7] assign io_rawOut_sExp = io_rawOut_sExp_0; // @[MulRecFN.scala:75:7] assign io_rawOut_sig = io_rawOut_sig_0; // @[MulRecFN.scala:75:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_169( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_305 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File PMP.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util.{Cat, log2Ceil} import org.chipsalliance.cde.config._ import freechips.rocketchip.tile._ import freechips.rocketchip.util._ class PMPConfig extends Bundle { val l = Bool() val res = UInt(2.W) val a = UInt(2.W) val x = Bool() val w = Bool() val r = Bool() } object PMP { def lgAlign = 2 def apply(reg: PMPReg): PMP = { val pmp = Wire(new PMP()(reg.p)) pmp.cfg := reg.cfg pmp.addr := reg.addr pmp.mask := pmp.computeMask pmp } } class PMPReg(implicit p: Parameters) extends CoreBundle()(p) { val cfg = new PMPConfig val addr = UInt((paddrBits - PMP.lgAlign).W) def reset(): Unit = { cfg.a := 0.U cfg.l := 0.U } def readAddr = if (pmpGranularity.log2 == PMP.lgAlign) addr else { val mask = ((BigInt(1) << (pmpGranularity.log2 - PMP.lgAlign)) - 1).U Mux(napot, addr | (mask >> 1), ~(~addr | mask)) } def napot = cfg.a(1) def torNotNAPOT = cfg.a(0) def tor = !napot && torNotNAPOT def cfgLocked = cfg.l def addrLocked(next: PMPReg) = cfgLocked || next.cfgLocked && next.tor } class PMP(implicit p: Parameters) extends PMPReg { val mask = UInt(paddrBits.W) import PMP._ def computeMask = { val base = Cat(addr, cfg.a(0)) | ((pmpGranularity - 1).U >> lgAlign) Cat(base & ~(base + 1.U), ((1 << lgAlign) - 1).U) } private def comparand = ~(~(addr << lgAlign) | (pmpGranularity - 1).U) private def pow2Match(x: UInt, lgSize: UInt, lgMaxSize: Int) = { def eval(a: UInt, b: UInt, m: UInt) = ((a ^ b) & ~m) === 0.U if (lgMaxSize <= pmpGranularity.log2) { eval(x, comparand, mask) } else { // break up the circuit; the MSB part will be CSE'd val lsbMask = mask | UIntToOH1(lgSize, lgMaxSize) val msbMatch = eval(x >> lgMaxSize, comparand >> lgMaxSize, mask >> lgMaxSize) val lsbMatch = eval(x(lgMaxSize-1, 0), comparand(lgMaxSize-1, 0), lsbMask(lgMaxSize-1, 0)) msbMatch && lsbMatch } } private def boundMatch(x: UInt, lsbMask: UInt, lgMaxSize: Int) = { if (lgMaxSize <= pmpGranularity.log2) { x < comparand } else { // break up the circuit; the MSB part will be CSE'd val msbsLess = (x >> lgMaxSize) < (comparand >> lgMaxSize) val msbsEqual = ((x >> lgMaxSize) ^ (comparand >> lgMaxSize)) === 0.U val lsbsLess = (x(lgMaxSize-1, 0) | lsbMask) < comparand(lgMaxSize-1, 0) msbsLess || (msbsEqual && lsbsLess) } } private def lowerBoundMatch(x: UInt, lgSize: UInt, lgMaxSize: Int) = !boundMatch(x, UIntToOH1(lgSize, lgMaxSize), lgMaxSize) private def upperBoundMatch(x: UInt, lgMaxSize: Int) = boundMatch(x, 0.U, lgMaxSize) private def rangeMatch(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP) = prev.lowerBoundMatch(x, lgSize, lgMaxSize) && upperBoundMatch(x, lgMaxSize) private def pow2Homogeneous(x: UInt, pgLevel: UInt) = { val maskHomogeneous = pgLevelMap { idxBits => if (idxBits > paddrBits) false.B else mask(idxBits - 1) } (pgLevel) maskHomogeneous || (pgLevelMap { idxBits => ((x ^ comparand) >> idxBits) =/= 0.U } (pgLevel)) } private def pgLevelMap[T](f: Int => T) = (0 until pgLevels).map { i => f(pgIdxBits + (pgLevels - 1 - i) * pgLevelBits) } private def rangeHomogeneous(x: UInt, pgLevel: UInt, prev: PMP) = { val beginsAfterLower = !(x < prev.comparand) val beginsAfterUpper = !(x < comparand) val pgMask = pgLevelMap { idxBits => (((BigInt(1) << paddrBits) - (BigInt(1) << idxBits)) max 0).U } (pgLevel) val endsBeforeLower = (x & pgMask) < (prev.comparand & pgMask) val endsBeforeUpper = (x & pgMask) < (comparand & pgMask) endsBeforeLower || beginsAfterUpper || (beginsAfterLower && endsBeforeUpper) } // returns whether this PMP completely contains, or contains none of, a page def homogeneous(x: UInt, pgLevel: UInt, prev: PMP): Bool = Mux(napot, pow2Homogeneous(x, pgLevel), !torNotNAPOT || rangeHomogeneous(x, pgLevel, prev)) // returns whether this matching PMP fully contains the access def aligned(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP): Bool = if (lgMaxSize <= pmpGranularity.log2) true.B else { val lsbMask = UIntToOH1(lgSize, lgMaxSize) val straddlesLowerBound = ((x >> lgMaxSize) ^ (prev.comparand >> lgMaxSize)) === 0.U && (prev.comparand(lgMaxSize-1, 0) & ~x(lgMaxSize-1, 0)) =/= 0.U val straddlesUpperBound = ((x >> lgMaxSize) ^ (comparand >> lgMaxSize)) === 0.U && (comparand(lgMaxSize-1, 0) & (x(lgMaxSize-1, 0) | lsbMask)) =/= 0.U val rangeAligned = !(straddlesLowerBound || straddlesUpperBound) val pow2Aligned = (lsbMask & ~mask(lgMaxSize-1, 0)) === 0.U Mux(napot, pow2Aligned, rangeAligned) } // returns whether this PMP matches at least one byte of the access def hit(x: UInt, lgSize: UInt, lgMaxSize: Int, prev: PMP): Bool = Mux(napot, pow2Match(x, lgSize, lgMaxSize), torNotNAPOT && rangeMatch(x, lgSize, lgMaxSize, prev)) } class PMPHomogeneityChecker(pmps: Seq[PMP])(implicit p: Parameters) { def apply(addr: UInt, pgLevel: UInt): Bool = { pmps.foldLeft((true.B, 0.U.asTypeOf(new PMP))) { case ((h, prev), pmp) => (h && pmp.homogeneous(addr, pgLevel, prev), pmp) }._1 } } class PMPChecker(lgMaxSize: Int)(implicit val p: Parameters) extends Module with HasCoreParameters { override def desiredName = s"PMPChecker_s${lgMaxSize}" val io = IO(new Bundle { val prv = Input(UInt(PRV.SZ.W)) val pmp = Input(Vec(nPMPs, new PMP)) val addr = Input(UInt(paddrBits.W)) val size = Input(UInt(log2Ceil(lgMaxSize + 1).W)) val r = Output(Bool()) val w = Output(Bool()) val x = Output(Bool()) }) val default = if (io.pmp.isEmpty) true.B else io.prv > PRV.S.U val pmp0 = WireInit(0.U.asTypeOf(new PMP)) pmp0.cfg.r := default pmp0.cfg.w := default pmp0.cfg.x := default val res = (io.pmp zip (pmp0 +: io.pmp)).reverse.foldLeft(pmp0) { case (prev, (pmp, prevPMP)) => val hit = pmp.hit(io.addr, io.size, lgMaxSize, prevPMP) val ignore = default && !pmp.cfg.l val aligned = pmp.aligned(io.addr, io.size, lgMaxSize, prevPMP) for ((name, idx) <- Seq("no", "TOR", if (pmpGranularity <= 4) "NA4" else "", "NAPOT").zipWithIndex; if name.nonEmpty) property.cover(pmp.cfg.a === idx.U, s"The cfg access is set to ${name} access ", "Cover PMP access mode setting") property.cover(pmp.cfg.l === 0x1.U, s"The cfg lock is set to high ", "Cover PMP lock mode setting") // Not including Write and no Read permission as the combination is reserved for ((name, idx) <- Seq("no", "RO", "", "RW", "X", "RX", "", "RWX").zipWithIndex; if name.nonEmpty) property.cover((Cat(pmp.cfg.x, pmp.cfg.w, pmp.cfg.r) === idx.U), s"The permission is set to ${name} access ", "Cover PMP access permission setting") for ((name, idx) <- Seq("", "TOR", if (pmpGranularity <= 4) "NA4" else "", "NAPOT").zipWithIndex; if name.nonEmpty) { property.cover(!ignore && hit && aligned && pmp.cfg.a === idx.U, s"The access matches ${name} mode ", "Cover PMP access") property.cover(pmp.cfg.l && hit && aligned && pmp.cfg.a === idx.U, s"The access matches ${name} mode with lock bit high", "Cover PMP access with lock bit") } val cur = WireInit(pmp) cur.cfg.r := aligned && (pmp.cfg.r || ignore) cur.cfg.w := aligned && (pmp.cfg.w || ignore) cur.cfg.x := aligned && (pmp.cfg.x || ignore) Mux(hit, cur, prev) } io.r := res.cfg.r io.w := res.cfg.w io.x := res.cfg.x }
module PMPChecker_s3_5( // @[PMP.scala:143:7] input clock, // @[PMP.scala:143:7] input reset, // @[PMP.scala:143:7] input [31:0] io_addr, // @[PMP.scala:146:14] input [1:0] io_size // @[PMP.scala:146:14] ); wire [31:0] io_addr_0 = io_addr; // @[PMP.scala:143:7] wire [1:0] io_size_0 = io_size; // @[PMP.scala:143:7] wire [28:0] _res_hit_msbMatch_T_8 = 29'h1FFFFFFF; // @[PMP.scala:63:54] wire [28:0] _res_hit_msbMatch_T_18 = 29'h1FFFFFFF; // @[PMP.scala:63:54] wire [28:0] _res_hit_msbMatch_T_28 = 29'h1FFFFFFF; // @[PMP.scala:63:54] wire [28:0] _res_hit_msbMatch_T_38 = 29'h1FFFFFFF; // @[PMP.scala:63:54] wire [28:0] _res_hit_msbMatch_T_48 = 29'h1FFFFFFF; // @[PMP.scala:63:54] wire [28:0] _res_hit_msbMatch_T_58 = 29'h1FFFFFFF; // @[PMP.scala:63:54] wire [28:0] _res_hit_msbMatch_T_68 = 29'h1FFFFFFF; // @[PMP.scala:63:54] wire [28:0] _res_hit_msbMatch_T_78 = 29'h1FFFFFFF; // @[PMP.scala:63:54] wire [28:0] _res_hit_msbMatch_T_5 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_6 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_5 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_5 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_11 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_12 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesLowerBound_T_5 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesUpperBound_T_5 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_15 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_16 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_17 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_19 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_23 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_26 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesLowerBound_T_22 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesUpperBound_T_22 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_25 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_26 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_29 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_33 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_35 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_40 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesLowerBound_T_39 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesUpperBound_T_39 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_35 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_36 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_41 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_47 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_47 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_54 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesLowerBound_T_56 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesUpperBound_T_56 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_45 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_46 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_53 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_61 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_59 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_68 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesLowerBound_T_73 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesUpperBound_T_73 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_55 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_56 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_65 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_75 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_71 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_82 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesLowerBound_T_90 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesUpperBound_T_90 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_65 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_66 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_77 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_89 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_83 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_96 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesLowerBound_T_107 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesUpperBound_T_107 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_75 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbMatch_T_76 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_89 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_103 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsLess_T_95 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_hit_msbsEqual_T_110 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesLowerBound_T_124 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [28:0] _res_aligned_straddlesUpperBound_T_124 = 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [31:0] _res_hit_msbMatch_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_4 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_8 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_9 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_9 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_10 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_10 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_11 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_9 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_10 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_2 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_3 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_9 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_10 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_12 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_13 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_12 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_13 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_14 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_15 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_16 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_17 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_17 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_18 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_20 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_21 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_23 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_24 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_24 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_25 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_19 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_20 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_26 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_27 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_19 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_20 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_26 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_27 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_22 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_23 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_22 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_23 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_26 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_27 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_30 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_31 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_31 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_32 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_32 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_33 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_37 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_38 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_38 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_39 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_36 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_37 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_43 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_44 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_36 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_37 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_43 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_44 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_32 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_33 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_32 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_33 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_38 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_39 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_44 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_45 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_45 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_46 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_44 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_45 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_51 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_52 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_52 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_53 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_53 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_54 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_60 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_61 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_53 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_54 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_60 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_61 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_42 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_43 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_42 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_43 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_50 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_51 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_58 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_59 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_59 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_60 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_56 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_57 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_65 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_66 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_66 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_67 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_70 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_71 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_77 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_78 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_70 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_71 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_77 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_78 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_52 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_53 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_52 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_53 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_62 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_63 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_72 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_73 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_73 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_74 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_68 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_69 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_79 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_80 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_80 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_81 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_87 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_88 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_94 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_95 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_87 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_88 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_94 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_95 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_62 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_63 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_62 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_63 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_74 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_75 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_86 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_87 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_87 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_88 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_80 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_81 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_93 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_94 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_94 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_95 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_104 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_105 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_111 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_112 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_104 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_105 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_111 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_112 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_72 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbMatch_T_73 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_72 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbMatch_T_73 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_86 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_87 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_100 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_101 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_101 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_102 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_92 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsLess_T_93 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_107 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_msbsEqual_T_108 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_108 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_hit_lsbsLess_T_109 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_121 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_122 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_128 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesLowerBound_T_129 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_121 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_122 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_128 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [31:0] _res_aligned_straddlesUpperBound_T_129 = 32'hFFFFFFFF; // @[PMP.scala:60:{29,48}] wire [2:0] _res_aligned_pow2Aligned_T_1 = 3'h7; // @[PMP.scala:126:34] wire [2:0] _res_aligned_pow2Aligned_T_4 = 3'h7; // @[PMP.scala:126:34] wire [2:0] _res_aligned_pow2Aligned_T_7 = 3'h7; // @[PMP.scala:126:34] wire [2:0] _res_aligned_pow2Aligned_T_10 = 3'h7; // @[PMP.scala:126:34] wire [2:0] _res_aligned_pow2Aligned_T_13 = 3'h7; // @[PMP.scala:126:34] wire [2:0] _res_aligned_pow2Aligned_T_16 = 3'h7; // @[PMP.scala:126:34] wire [2:0] _res_aligned_pow2Aligned_T_19 = 3'h7; // @[PMP.scala:126:34] wire [2:0] _res_aligned_pow2Aligned_T_22 = 3'h7; // @[PMP.scala:126:34] wire [2:0] _res_hit_lsbMatch_T_5 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_6 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_13 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_12 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_15 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_12 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_15 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_pow2Aligned_T = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_5 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_7 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_9 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_11 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_13 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_15 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbMatch_T_15 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_20 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_27 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_29 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_32 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_29 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_32 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_pow2Aligned_T_3 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_50 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_52 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_54 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_56 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_58 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_60 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbMatch_T_25 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_34 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_41 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_46 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_49 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_46 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_49 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_pow2Aligned_T_6 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_95 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_97 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_99 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_101 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_103 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_105 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbMatch_T_35 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_48 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_55 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_63 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_66 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_63 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_66 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_pow2Aligned_T_9 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_140 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_142 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_144 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_146 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_148 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_150 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbMatch_T_45 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_62 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_69 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_80 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_83 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_80 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_83 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_pow2Aligned_T_12 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_185 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_187 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_189 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_191 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_193 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_195 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbMatch_T_55 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_76 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_83 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_97 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_100 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_97 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_100 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_pow2Aligned_T_15 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_230 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_232 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_234 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_236 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_238 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_240 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbMatch_T_65 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_90 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_97 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_114 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_117 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_114 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_117 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_pow2Aligned_T_18 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_275 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_277 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_279 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_281 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_283 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_285 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbMatch_T_75 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_104 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_hit_lsbsLess_T_111 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_131 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesLowerBound_T_134 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_131 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_straddlesUpperBound_T_134 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_aligned_pow2Aligned_T_21 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_320 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_322 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_324 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_326 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_328 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire [2:0] _res_T_330 = 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire _res_hit_T_8 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_ignore_T = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_rangeAligned = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_6 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_17 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_26 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_35 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_hit_T_21 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_ignore_T_1 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_rangeAligned_1 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_1 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_45 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_51 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_62 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_71 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_80 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_hit_T_34 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_ignore_T_2 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_rangeAligned_2 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_2 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_90 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_96 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_107 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_116 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_125 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_hit_T_47 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_ignore_T_3 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_rangeAligned_3 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_3 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_135 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_141 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_152 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_161 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_170 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_hit_T_60 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_ignore_T_4 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_rangeAligned_4 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_4 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_180 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_186 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_197 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_206 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_215 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_hit_T_73 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_ignore_T_5 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_rangeAligned_5 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_5 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_225 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_231 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_242 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_251 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_260 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_hit_T_86 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_ignore_T_6 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_rangeAligned_6 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_6 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_270 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_276 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_287 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_296 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_305 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_hit_T_99 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_ignore_T_7 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_rangeAligned_7 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire res_aligned_7 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_315 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_321 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_332 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_341 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire _res_T_350 = 1'h1; // @[PMP.scala:88:5, :125:24, :127:8, :164:29, :168:32, :174:60, :177:22] wire [31:0] io_pmp_0_mask = 32'h0; // @[PMP.scala:143:7] wire [31:0] io_pmp_1_mask = 32'h0; // @[PMP.scala:143:7] wire [31:0] io_pmp_2_mask = 32'h0; // @[PMP.scala:143:7] wire [31:0] io_pmp_3_mask = 32'h0; // @[PMP.scala:143:7] wire [31:0] io_pmp_4_mask = 32'h0; // @[PMP.scala:143:7] wire [31:0] io_pmp_5_mask = 32'h0; // @[PMP.scala:143:7] wire [31:0] io_pmp_6_mask = 32'h0; // @[PMP.scala:143:7] wire [31:0] io_pmp_7_mask = 32'h0; // @[PMP.scala:143:7] wire [31:0] _pmp0_WIRE_mask = 32'h0; // @[PMP.scala:157:35] wire [31:0] pmp0_mask = 32'h0; // @[PMP.scala:157:22] wire [31:0] _res_hit_msbMatch_T_1 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbMatch_T_4 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbMatch_T_1 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbMatch_T_4 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_1 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_4 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_1 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_4 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_2 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_5 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_7 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_10 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_8 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_11 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_9 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_12 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_1 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_4 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_8 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_11 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_1 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_4 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_8 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_11 = 32'h0; // @[PMP.scala:60:27] wire [31:0] res_cur_mask = 32'h0; // @[PMP.scala:181:23] wire [31:0] _res_T_44_mask = 32'h0; // @[PMP.scala:185:8] wire [31:0] _res_hit_msbMatch_T_11 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbMatch_T_14 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbMatch_T_11 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbMatch_T_14 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_13 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_16 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_15 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_18 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_16 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_19 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_19 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_22 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_22 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_25 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_23 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_26 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_18 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_21 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_25 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_28 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_18 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_21 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_25 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_28 = 32'h0; // @[PMP.scala:60:27] wire [31:0] res_cur_1_mask = 32'h0; // @[PMP.scala:181:23] wire [31:0] _res_T_89_mask = 32'h0; // @[PMP.scala:185:8] wire [31:0] _res_hit_msbMatch_T_21 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbMatch_T_24 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbMatch_T_21 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbMatch_T_24 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_25 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_28 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_29 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_32 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_30 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_33 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_31 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_34 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_36 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_39 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_37 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_40 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_35 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_38 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_42 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_45 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_35 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_38 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_42 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_45 = 32'h0; // @[PMP.scala:60:27] wire [31:0] res_cur_2_mask = 32'h0; // @[PMP.scala:181:23] wire [31:0] _res_T_134_mask = 32'h0; // @[PMP.scala:185:8] wire [31:0] _res_hit_msbMatch_T_31 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbMatch_T_34 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbMatch_T_31 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbMatch_T_34 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_37 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_40 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_43 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_46 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_44 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_47 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_43 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_46 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_50 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_53 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_51 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_54 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_52 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_55 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_59 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_62 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_52 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_55 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_59 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_62 = 32'h0; // @[PMP.scala:60:27] wire [31:0] res_cur_3_mask = 32'h0; // @[PMP.scala:181:23] wire [31:0] _res_T_179_mask = 32'h0; // @[PMP.scala:185:8] wire [31:0] _res_hit_msbMatch_T_41 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbMatch_T_44 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbMatch_T_41 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbMatch_T_44 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_49 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_52 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_57 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_60 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_58 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_61 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_55 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_58 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_64 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_67 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_65 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_68 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_69 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_72 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_76 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_79 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_69 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_72 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_76 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_79 = 32'h0; // @[PMP.scala:60:27] wire [31:0] res_cur_4_mask = 32'h0; // @[PMP.scala:181:23] wire [31:0] _res_T_224_mask = 32'h0; // @[PMP.scala:185:8] wire [31:0] _res_hit_msbMatch_T_51 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbMatch_T_54 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbMatch_T_51 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbMatch_T_54 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_61 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_64 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_71 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_74 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_72 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_75 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_67 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_70 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_78 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_81 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_79 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_82 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_86 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_89 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_93 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_96 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_86 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_89 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_93 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_96 = 32'h0; // @[PMP.scala:60:27] wire [31:0] res_cur_5_mask = 32'h0; // @[PMP.scala:181:23] wire [31:0] _res_T_269_mask = 32'h0; // @[PMP.scala:185:8] wire [31:0] _res_hit_msbMatch_T_61 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbMatch_T_64 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbMatch_T_61 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbMatch_T_64 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_73 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_76 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_85 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_88 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_86 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_89 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_79 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_82 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_92 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_95 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_93 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_96 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_103 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_106 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_110 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_113 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_103 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_106 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_110 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_113 = 32'h0; // @[PMP.scala:60:27] wire [31:0] res_cur_6_mask = 32'h0; // @[PMP.scala:181:23] wire [31:0] _res_T_314_mask = 32'h0; // @[PMP.scala:185:8] wire [31:0] _res_hit_msbMatch_T_71 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbMatch_T_74 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbMatch_T_71 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbMatch_T_74 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_85 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_88 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_99 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_102 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_100 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_103 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsLess_T_91 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsLess_T_94 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_msbsEqual_T_106 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_msbsEqual_T_109 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_hit_lsbsLess_T_107 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_hit_lsbsLess_T_110 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_120 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_123 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesLowerBound_T_127 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesLowerBound_T_130 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_120 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_123 = 32'h0; // @[PMP.scala:60:27] wire [31:0] _res_aligned_straddlesUpperBound_T_127 = 32'h0; // @[PMP.scala:60:36] wire [31:0] _res_aligned_straddlesUpperBound_T_130 = 32'h0; // @[PMP.scala:60:27] wire [31:0] res_cur_7_mask = 32'h0; // @[PMP.scala:181:23] wire [31:0] res_mask = 32'h0; // @[PMP.scala:185:8] wire [29:0] io_pmp_0_addr = 30'h0; // @[PMP.scala:143:7] wire [29:0] io_pmp_1_addr = 30'h0; // @[PMP.scala:143:7] wire [29:0] io_pmp_2_addr = 30'h0; // @[PMP.scala:143:7] wire [29:0] io_pmp_3_addr = 30'h0; // @[PMP.scala:143:7] wire [29:0] io_pmp_4_addr = 30'h0; // @[PMP.scala:143:7] wire [29:0] io_pmp_5_addr = 30'h0; // @[PMP.scala:143:7] wire [29:0] io_pmp_6_addr = 30'h0; // @[PMP.scala:143:7] wire [29:0] io_pmp_7_addr = 30'h0; // @[PMP.scala:143:7] wire [29:0] _pmp0_WIRE_addr = 30'h0; // @[PMP.scala:157:35] wire [29:0] pmp0_addr = 30'h0; // @[PMP.scala:157:22] wire [29:0] res_cur_addr = 30'h0; // @[PMP.scala:181:23] wire [29:0] _res_T_44_addr = 30'h0; // @[PMP.scala:185:8] wire [29:0] res_cur_1_addr = 30'h0; // @[PMP.scala:181:23] wire [29:0] _res_T_89_addr = 30'h0; // @[PMP.scala:185:8] wire [29:0] res_cur_2_addr = 30'h0; // @[PMP.scala:181:23] wire [29:0] _res_T_134_addr = 30'h0; // @[PMP.scala:185:8] wire [29:0] res_cur_3_addr = 30'h0; // @[PMP.scala:181:23] wire [29:0] _res_T_179_addr = 30'h0; // @[PMP.scala:185:8] wire [29:0] res_cur_4_addr = 30'h0; // @[PMP.scala:181:23] wire [29:0] _res_T_224_addr = 30'h0; // @[PMP.scala:185:8] wire [29:0] res_cur_5_addr = 30'h0; // @[PMP.scala:181:23] wire [29:0] _res_T_269_addr = 30'h0; // @[PMP.scala:185:8] wire [29:0] res_cur_6_addr = 30'h0; // @[PMP.scala:181:23] wire [29:0] _res_T_314_addr = 30'h0; // @[PMP.scala:185:8] wire [29:0] res_cur_7_addr = 30'h0; // @[PMP.scala:181:23] wire [29:0] res_addr = 30'h0; // @[PMP.scala:185:8] wire [1:0] io_pmp_0_cfg_res = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_0_cfg_a = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_1_cfg_res = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_1_cfg_a = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_2_cfg_res = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_2_cfg_a = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_3_cfg_res = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_3_cfg_a = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_4_cfg_res = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_4_cfg_a = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_5_cfg_res = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_5_cfg_a = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_6_cfg_res = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_6_cfg_a = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_7_cfg_res = 2'h0; // @[PMP.scala:143:7] wire [1:0] io_pmp_7_cfg_a = 2'h0; // @[PMP.scala:143:7] wire [1:0] _pmp0_WIRE_cfg_res = 2'h0; // @[PMP.scala:157:35] wire [1:0] _pmp0_WIRE_cfg_a = 2'h0; // @[PMP.scala:157:35] wire [1:0] pmp0_cfg_res = 2'h0; // @[PMP.scala:157:22] wire [1:0] pmp0_cfg_a = 2'h0; // @[PMP.scala:157:22] wire [1:0] res_hi = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_1 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_2 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_3 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_4 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_5 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_cur_cfg_res = 2'h0; // @[PMP.scala:181:23] wire [1:0] res_cur_cfg_a = 2'h0; // @[PMP.scala:181:23] wire [1:0] _res_T_44_cfg_res = 2'h0; // @[PMP.scala:185:8] wire [1:0] _res_T_44_cfg_a = 2'h0; // @[PMP.scala:185:8] wire [1:0] res_hi_6 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_7 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_8 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_9 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_10 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_11 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_cur_1_cfg_res = 2'h0; // @[PMP.scala:181:23] wire [1:0] res_cur_1_cfg_a = 2'h0; // @[PMP.scala:181:23] wire [1:0] _res_T_89_cfg_res = 2'h0; // @[PMP.scala:185:8] wire [1:0] _res_T_89_cfg_a = 2'h0; // @[PMP.scala:185:8] wire [1:0] res_hi_12 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_13 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_14 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_15 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_16 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_17 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_cur_2_cfg_res = 2'h0; // @[PMP.scala:181:23] wire [1:0] res_cur_2_cfg_a = 2'h0; // @[PMP.scala:181:23] wire [1:0] _res_T_134_cfg_res = 2'h0; // @[PMP.scala:185:8] wire [1:0] _res_T_134_cfg_a = 2'h0; // @[PMP.scala:185:8] wire [1:0] res_hi_18 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_19 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_20 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_21 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_22 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_23 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_cur_3_cfg_res = 2'h0; // @[PMP.scala:181:23] wire [1:0] res_cur_3_cfg_a = 2'h0; // @[PMP.scala:181:23] wire [1:0] _res_T_179_cfg_res = 2'h0; // @[PMP.scala:185:8] wire [1:0] _res_T_179_cfg_a = 2'h0; // @[PMP.scala:185:8] wire [1:0] res_hi_24 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_25 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_26 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_27 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_28 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_29 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_cur_4_cfg_res = 2'h0; // @[PMP.scala:181:23] wire [1:0] res_cur_4_cfg_a = 2'h0; // @[PMP.scala:181:23] wire [1:0] _res_T_224_cfg_res = 2'h0; // @[PMP.scala:185:8] wire [1:0] _res_T_224_cfg_a = 2'h0; // @[PMP.scala:185:8] wire [1:0] res_hi_30 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_31 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_32 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_33 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_34 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_35 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_cur_5_cfg_res = 2'h0; // @[PMP.scala:181:23] wire [1:0] res_cur_5_cfg_a = 2'h0; // @[PMP.scala:181:23] wire [1:0] _res_T_269_cfg_res = 2'h0; // @[PMP.scala:185:8] wire [1:0] _res_T_269_cfg_a = 2'h0; // @[PMP.scala:185:8] wire [1:0] res_hi_36 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_37 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_38 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_39 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_40 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_41 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_cur_6_cfg_res = 2'h0; // @[PMP.scala:181:23] wire [1:0] res_cur_6_cfg_a = 2'h0; // @[PMP.scala:181:23] wire [1:0] _res_T_314_cfg_res = 2'h0; // @[PMP.scala:185:8] wire [1:0] _res_T_314_cfg_a = 2'h0; // @[PMP.scala:185:8] wire [1:0] res_hi_42 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_43 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_44 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_45 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_46 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_hi_47 = 2'h0; // @[PMP.scala:174:26] wire [1:0] res_cur_7_cfg_res = 2'h0; // @[PMP.scala:181:23] wire [1:0] res_cur_7_cfg_a = 2'h0; // @[PMP.scala:181:23] wire [1:0] res_cfg_res = 2'h0; // @[PMP.scala:185:8] wire [1:0] res_cfg_a = 2'h0; // @[PMP.scala:185:8] wire io_pmp_0_cfg_l = 1'h0; // @[PMP.scala:143:7] wire io_pmp_0_cfg_x = 1'h0; // @[PMP.scala:143:7] wire io_pmp_0_cfg_w = 1'h0; // @[PMP.scala:143:7] wire io_pmp_0_cfg_r = 1'h0; // @[PMP.scala:143:7] wire io_pmp_1_cfg_l = 1'h0; // @[PMP.scala:143:7] wire io_pmp_1_cfg_x = 1'h0; // @[PMP.scala:143:7] wire io_pmp_1_cfg_w = 1'h0; // @[PMP.scala:143:7] wire io_pmp_1_cfg_r = 1'h0; // @[PMP.scala:143:7] wire io_pmp_2_cfg_l = 1'h0; // @[PMP.scala:143:7] wire io_pmp_2_cfg_x = 1'h0; // @[PMP.scala:143:7] wire io_pmp_2_cfg_w = 1'h0; // @[PMP.scala:143:7] wire io_pmp_2_cfg_r = 1'h0; // @[PMP.scala:143:7] wire io_pmp_3_cfg_l = 1'h0; // @[PMP.scala:143:7] wire io_pmp_3_cfg_x = 1'h0; // @[PMP.scala:143:7] wire io_pmp_3_cfg_w = 1'h0; // @[PMP.scala:143:7] wire io_pmp_3_cfg_r = 1'h0; // @[PMP.scala:143:7] wire io_pmp_4_cfg_l = 1'h0; // @[PMP.scala:143:7] wire io_pmp_4_cfg_x = 1'h0; // @[PMP.scala:143:7] wire io_pmp_4_cfg_w = 1'h0; // @[PMP.scala:143:7] wire io_pmp_4_cfg_r = 1'h0; // @[PMP.scala:143:7] wire io_pmp_5_cfg_l = 1'h0; // @[PMP.scala:143:7] wire io_pmp_5_cfg_x = 1'h0; // @[PMP.scala:143:7] wire io_pmp_5_cfg_w = 1'h0; // @[PMP.scala:143:7] wire io_pmp_5_cfg_r = 1'h0; // @[PMP.scala:143:7] wire io_pmp_6_cfg_l = 1'h0; // @[PMP.scala:143:7] wire io_pmp_6_cfg_x = 1'h0; // @[PMP.scala:143:7] wire io_pmp_6_cfg_w = 1'h0; // @[PMP.scala:143:7] wire io_pmp_6_cfg_r = 1'h0; // @[PMP.scala:143:7] wire io_pmp_7_cfg_l = 1'h0; // @[PMP.scala:143:7] wire io_pmp_7_cfg_x = 1'h0; // @[PMP.scala:143:7] wire io_pmp_7_cfg_w = 1'h0; // @[PMP.scala:143:7] wire io_pmp_7_cfg_r = 1'h0; // @[PMP.scala:143:7] wire io_r = 1'h0; // @[PMP.scala:143:7] wire io_w = 1'h0; // @[PMP.scala:143:7] wire io_x = 1'h0; // @[PMP.scala:143:7] wire default_0 = 1'h0; // @[PMP.scala:156:56] wire _pmp0_WIRE_cfg_l = 1'h0; // @[PMP.scala:157:35] wire _pmp0_WIRE_cfg_x = 1'h0; // @[PMP.scala:157:35] wire _pmp0_WIRE_cfg_w = 1'h0; // @[PMP.scala:157:35] wire _pmp0_WIRE_cfg_r = 1'h0; // @[PMP.scala:157:35] wire pmp0_cfg_l = 1'h0; // @[PMP.scala:157:22] wire pmp0_cfg_x = 1'h0; // @[PMP.scala:157:22] wire pmp0_cfg_w = 1'h0; // @[PMP.scala:157:22] wire pmp0_cfg_r = 1'h0; // @[PMP.scala:157:22] wire _res_hit_T = 1'h0; // @[PMP.scala:45:20] wire _res_hit_T_2 = 1'h0; // @[PMP.scala:46:26] wire res_hit_msbsLess = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_6 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_7 = 1'h0; // @[PMP.scala:83:16] wire res_hit_msbsLess_1 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_1 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_9 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_10 = 1'h0; // @[PMP.scala:83:16] wire _res_hit_T_11 = 1'h0; // @[PMP.scala:94:48] wire _res_hit_T_12 = 1'h0; // @[PMP.scala:132:61] wire res_hit = 1'h0; // @[PMP.scala:132:8] wire res_ignore = 1'h0; // @[PMP.scala:164:26] wire _res_aligned_straddlesLowerBound_T_16 = 1'h0; // @[PMP.scala:123:147] wire res_aligned_straddlesLowerBound = 1'h0; // @[PMP.scala:123:90] wire _res_aligned_straddlesUpperBound_T_16 = 1'h0; // @[PMP.scala:124:148] wire res_aligned_straddlesUpperBound = 1'h0; // @[PMP.scala:124:85] wire _res_aligned_rangeAligned_T = 1'h0; // @[PMP.scala:125:46] wire _res_aligned_T = 1'h0; // @[PMP.scala:45:20] wire _res_T_1 = 1'h0; // @[PMP.scala:168:32] wire _res_T_2 = 1'h0; // @[PMP.scala:168:32] wire _res_T_3 = 1'h0; // @[PMP.scala:168:32] wire _res_T_4 = 1'h0; // @[PMP.scala:170:30] wire _res_T_8 = 1'h0; // @[PMP.scala:174:60] wire _res_T_10 = 1'h0; // @[PMP.scala:174:60] wire _res_T_12 = 1'h0; // @[PMP.scala:174:60] wire _res_T_14 = 1'h0; // @[PMP.scala:174:60] wire _res_T_16 = 1'h0; // @[PMP.scala:174:60] wire _res_T_18 = 1'h0; // @[PMP.scala:177:30] wire _res_T_19 = 1'h0; // @[PMP.scala:177:37] wire _res_T_20 = 1'h0; // @[PMP.scala:177:61] wire _res_T_21 = 1'h0; // @[PMP.scala:177:48] wire _res_T_22 = 1'h0; // @[PMP.scala:178:32] wire _res_T_23 = 1'h0; // @[PMP.scala:178:39] wire _res_T_24 = 1'h0; // @[PMP.scala:178:63] wire _res_T_25 = 1'h0; // @[PMP.scala:178:50] wire _res_T_27 = 1'h0; // @[PMP.scala:177:30] wire _res_T_28 = 1'h0; // @[PMP.scala:177:37] wire _res_T_29 = 1'h0; // @[PMP.scala:177:61] wire _res_T_30 = 1'h0; // @[PMP.scala:177:48] wire _res_T_31 = 1'h0; // @[PMP.scala:178:32] wire _res_T_32 = 1'h0; // @[PMP.scala:178:39] wire _res_T_33 = 1'h0; // @[PMP.scala:178:63] wire _res_T_34 = 1'h0; // @[PMP.scala:178:50] wire _res_T_36 = 1'h0; // @[PMP.scala:177:30] wire _res_T_37 = 1'h0; // @[PMP.scala:177:37] wire _res_T_38 = 1'h0; // @[PMP.scala:177:61] wire _res_T_39 = 1'h0; // @[PMP.scala:177:48] wire _res_T_40 = 1'h0; // @[PMP.scala:178:32] wire _res_T_41 = 1'h0; // @[PMP.scala:178:39] wire _res_T_42 = 1'h0; // @[PMP.scala:178:63] wire _res_T_43 = 1'h0; // @[PMP.scala:178:50] wire res_cur_cfg_l = 1'h0; // @[PMP.scala:181:23] wire res_cur_cfg_x = 1'h0; // @[PMP.scala:181:23] wire res_cur_cfg_w = 1'h0; // @[PMP.scala:181:23] wire res_cur_cfg_r = 1'h0; // @[PMP.scala:181:23] wire _res_cur_cfg_r_T = 1'h0; // @[PMP.scala:182:40] wire _res_cur_cfg_r_T_1 = 1'h0; // @[PMP.scala:182:26] wire _res_cur_cfg_w_T = 1'h0; // @[PMP.scala:183:40] wire _res_cur_cfg_w_T_1 = 1'h0; // @[PMP.scala:183:26] wire _res_cur_cfg_x_T = 1'h0; // @[PMP.scala:184:40] wire _res_cur_cfg_x_T_1 = 1'h0; // @[PMP.scala:184:26] wire _res_T_44_cfg_l = 1'h0; // @[PMP.scala:185:8] wire _res_T_44_cfg_x = 1'h0; // @[PMP.scala:185:8] wire _res_T_44_cfg_w = 1'h0; // @[PMP.scala:185:8] wire _res_T_44_cfg_r = 1'h0; // @[PMP.scala:185:8] wire _res_hit_T_13 = 1'h0; // @[PMP.scala:45:20] wire _res_hit_T_15 = 1'h0; // @[PMP.scala:46:26] wire res_hit_msbsLess_2 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_2 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_19 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_20 = 1'h0; // @[PMP.scala:83:16] wire res_hit_msbsLess_3 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_3 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_22 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_23 = 1'h0; // @[PMP.scala:83:16] wire _res_hit_T_24 = 1'h0; // @[PMP.scala:94:48] wire _res_hit_T_25 = 1'h0; // @[PMP.scala:132:61] wire res_hit_1 = 1'h0; // @[PMP.scala:132:8] wire res_ignore_1 = 1'h0; // @[PMP.scala:164:26] wire _res_aligned_straddlesLowerBound_T_33 = 1'h0; // @[PMP.scala:123:147] wire res_aligned_straddlesLowerBound_1 = 1'h0; // @[PMP.scala:123:90] wire _res_aligned_straddlesUpperBound_T_33 = 1'h0; // @[PMP.scala:124:148] wire res_aligned_straddlesUpperBound_1 = 1'h0; // @[PMP.scala:124:85] wire _res_aligned_rangeAligned_T_1 = 1'h0; // @[PMP.scala:125:46] wire _res_aligned_T_1 = 1'h0; // @[PMP.scala:45:20] wire _res_T_46 = 1'h0; // @[PMP.scala:168:32] wire _res_T_47 = 1'h0; // @[PMP.scala:168:32] wire _res_T_48 = 1'h0; // @[PMP.scala:168:32] wire _res_T_49 = 1'h0; // @[PMP.scala:170:30] wire _res_T_53 = 1'h0; // @[PMP.scala:174:60] wire _res_T_55 = 1'h0; // @[PMP.scala:174:60] wire _res_T_57 = 1'h0; // @[PMP.scala:174:60] wire _res_T_59 = 1'h0; // @[PMP.scala:174:60] wire _res_T_61 = 1'h0; // @[PMP.scala:174:60] wire _res_T_63 = 1'h0; // @[PMP.scala:177:30] wire _res_T_64 = 1'h0; // @[PMP.scala:177:37] wire _res_T_65 = 1'h0; // @[PMP.scala:177:61] wire _res_T_66 = 1'h0; // @[PMP.scala:177:48] wire _res_T_67 = 1'h0; // @[PMP.scala:178:32] wire _res_T_68 = 1'h0; // @[PMP.scala:178:39] wire _res_T_69 = 1'h0; // @[PMP.scala:178:63] wire _res_T_70 = 1'h0; // @[PMP.scala:178:50] wire _res_T_72 = 1'h0; // @[PMP.scala:177:30] wire _res_T_73 = 1'h0; // @[PMP.scala:177:37] wire _res_T_74 = 1'h0; // @[PMP.scala:177:61] wire _res_T_75 = 1'h0; // @[PMP.scala:177:48] wire _res_T_76 = 1'h0; // @[PMP.scala:178:32] wire _res_T_77 = 1'h0; // @[PMP.scala:178:39] wire _res_T_78 = 1'h0; // @[PMP.scala:178:63] wire _res_T_79 = 1'h0; // @[PMP.scala:178:50] wire _res_T_81 = 1'h0; // @[PMP.scala:177:30] wire _res_T_82 = 1'h0; // @[PMP.scala:177:37] wire _res_T_83 = 1'h0; // @[PMP.scala:177:61] wire _res_T_84 = 1'h0; // @[PMP.scala:177:48] wire _res_T_85 = 1'h0; // @[PMP.scala:178:32] wire _res_T_86 = 1'h0; // @[PMP.scala:178:39] wire _res_T_87 = 1'h0; // @[PMP.scala:178:63] wire _res_T_88 = 1'h0; // @[PMP.scala:178:50] wire res_cur_1_cfg_l = 1'h0; // @[PMP.scala:181:23] wire res_cur_1_cfg_x = 1'h0; // @[PMP.scala:181:23] wire res_cur_1_cfg_w = 1'h0; // @[PMP.scala:181:23] wire res_cur_1_cfg_r = 1'h0; // @[PMP.scala:181:23] wire _res_cur_cfg_r_T_2 = 1'h0; // @[PMP.scala:182:40] wire _res_cur_cfg_r_T_3 = 1'h0; // @[PMP.scala:182:26] wire _res_cur_cfg_w_T_2 = 1'h0; // @[PMP.scala:183:40] wire _res_cur_cfg_w_T_3 = 1'h0; // @[PMP.scala:183:26] wire _res_cur_cfg_x_T_2 = 1'h0; // @[PMP.scala:184:40] wire _res_cur_cfg_x_T_3 = 1'h0; // @[PMP.scala:184:26] wire _res_T_89_cfg_l = 1'h0; // @[PMP.scala:185:8] wire _res_T_89_cfg_x = 1'h0; // @[PMP.scala:185:8] wire _res_T_89_cfg_w = 1'h0; // @[PMP.scala:185:8] wire _res_T_89_cfg_r = 1'h0; // @[PMP.scala:185:8] wire _res_hit_T_26 = 1'h0; // @[PMP.scala:45:20] wire _res_hit_T_28 = 1'h0; // @[PMP.scala:46:26] wire res_hit_msbsLess_4 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_4 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_32 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_33 = 1'h0; // @[PMP.scala:83:16] wire res_hit_msbsLess_5 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_5 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_35 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_36 = 1'h0; // @[PMP.scala:83:16] wire _res_hit_T_37 = 1'h0; // @[PMP.scala:94:48] wire _res_hit_T_38 = 1'h0; // @[PMP.scala:132:61] wire res_hit_2 = 1'h0; // @[PMP.scala:132:8] wire res_ignore_2 = 1'h0; // @[PMP.scala:164:26] wire _res_aligned_straddlesLowerBound_T_50 = 1'h0; // @[PMP.scala:123:147] wire res_aligned_straddlesLowerBound_2 = 1'h0; // @[PMP.scala:123:90] wire _res_aligned_straddlesUpperBound_T_50 = 1'h0; // @[PMP.scala:124:148] wire res_aligned_straddlesUpperBound_2 = 1'h0; // @[PMP.scala:124:85] wire _res_aligned_rangeAligned_T_2 = 1'h0; // @[PMP.scala:125:46] wire _res_aligned_T_2 = 1'h0; // @[PMP.scala:45:20] wire _res_T_91 = 1'h0; // @[PMP.scala:168:32] wire _res_T_92 = 1'h0; // @[PMP.scala:168:32] wire _res_T_93 = 1'h0; // @[PMP.scala:168:32] wire _res_T_94 = 1'h0; // @[PMP.scala:170:30] wire _res_T_98 = 1'h0; // @[PMP.scala:174:60] wire _res_T_100 = 1'h0; // @[PMP.scala:174:60] wire _res_T_102 = 1'h0; // @[PMP.scala:174:60] wire _res_T_104 = 1'h0; // @[PMP.scala:174:60] wire _res_T_106 = 1'h0; // @[PMP.scala:174:60] wire _res_T_108 = 1'h0; // @[PMP.scala:177:30] wire _res_T_109 = 1'h0; // @[PMP.scala:177:37] wire _res_T_110 = 1'h0; // @[PMP.scala:177:61] wire _res_T_111 = 1'h0; // @[PMP.scala:177:48] wire _res_T_112 = 1'h0; // @[PMP.scala:178:32] wire _res_T_113 = 1'h0; // @[PMP.scala:178:39] wire _res_T_114 = 1'h0; // @[PMP.scala:178:63] wire _res_T_115 = 1'h0; // @[PMP.scala:178:50] wire _res_T_117 = 1'h0; // @[PMP.scala:177:30] wire _res_T_118 = 1'h0; // @[PMP.scala:177:37] wire _res_T_119 = 1'h0; // @[PMP.scala:177:61] wire _res_T_120 = 1'h0; // @[PMP.scala:177:48] wire _res_T_121 = 1'h0; // @[PMP.scala:178:32] wire _res_T_122 = 1'h0; // @[PMP.scala:178:39] wire _res_T_123 = 1'h0; // @[PMP.scala:178:63] wire _res_T_124 = 1'h0; // @[PMP.scala:178:50] wire _res_T_126 = 1'h0; // @[PMP.scala:177:30] wire _res_T_127 = 1'h0; // @[PMP.scala:177:37] wire _res_T_128 = 1'h0; // @[PMP.scala:177:61] wire _res_T_129 = 1'h0; // @[PMP.scala:177:48] wire _res_T_130 = 1'h0; // @[PMP.scala:178:32] wire _res_T_131 = 1'h0; // @[PMP.scala:178:39] wire _res_T_132 = 1'h0; // @[PMP.scala:178:63] wire _res_T_133 = 1'h0; // @[PMP.scala:178:50] wire res_cur_2_cfg_l = 1'h0; // @[PMP.scala:181:23] wire res_cur_2_cfg_x = 1'h0; // @[PMP.scala:181:23] wire res_cur_2_cfg_w = 1'h0; // @[PMP.scala:181:23] wire res_cur_2_cfg_r = 1'h0; // @[PMP.scala:181:23] wire _res_cur_cfg_r_T_4 = 1'h0; // @[PMP.scala:182:40] wire _res_cur_cfg_r_T_5 = 1'h0; // @[PMP.scala:182:26] wire _res_cur_cfg_w_T_4 = 1'h0; // @[PMP.scala:183:40] wire _res_cur_cfg_w_T_5 = 1'h0; // @[PMP.scala:183:26] wire _res_cur_cfg_x_T_4 = 1'h0; // @[PMP.scala:184:40] wire _res_cur_cfg_x_T_5 = 1'h0; // @[PMP.scala:184:26] wire _res_T_134_cfg_l = 1'h0; // @[PMP.scala:185:8] wire _res_T_134_cfg_x = 1'h0; // @[PMP.scala:185:8] wire _res_T_134_cfg_w = 1'h0; // @[PMP.scala:185:8] wire _res_T_134_cfg_r = 1'h0; // @[PMP.scala:185:8] wire _res_hit_T_39 = 1'h0; // @[PMP.scala:45:20] wire _res_hit_T_41 = 1'h0; // @[PMP.scala:46:26] wire res_hit_msbsLess_6 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_6 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_45 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_46 = 1'h0; // @[PMP.scala:83:16] wire res_hit_msbsLess_7 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_7 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_48 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_49 = 1'h0; // @[PMP.scala:83:16] wire _res_hit_T_50 = 1'h0; // @[PMP.scala:94:48] wire _res_hit_T_51 = 1'h0; // @[PMP.scala:132:61] wire res_hit_3 = 1'h0; // @[PMP.scala:132:8] wire res_ignore_3 = 1'h0; // @[PMP.scala:164:26] wire _res_aligned_straddlesLowerBound_T_67 = 1'h0; // @[PMP.scala:123:147] wire res_aligned_straddlesLowerBound_3 = 1'h0; // @[PMP.scala:123:90] wire _res_aligned_straddlesUpperBound_T_67 = 1'h0; // @[PMP.scala:124:148] wire res_aligned_straddlesUpperBound_3 = 1'h0; // @[PMP.scala:124:85] wire _res_aligned_rangeAligned_T_3 = 1'h0; // @[PMP.scala:125:46] wire _res_aligned_T_3 = 1'h0; // @[PMP.scala:45:20] wire _res_T_136 = 1'h0; // @[PMP.scala:168:32] wire _res_T_137 = 1'h0; // @[PMP.scala:168:32] wire _res_T_138 = 1'h0; // @[PMP.scala:168:32] wire _res_T_139 = 1'h0; // @[PMP.scala:170:30] wire _res_T_143 = 1'h0; // @[PMP.scala:174:60] wire _res_T_145 = 1'h0; // @[PMP.scala:174:60] wire _res_T_147 = 1'h0; // @[PMP.scala:174:60] wire _res_T_149 = 1'h0; // @[PMP.scala:174:60] wire _res_T_151 = 1'h0; // @[PMP.scala:174:60] wire _res_T_153 = 1'h0; // @[PMP.scala:177:30] wire _res_T_154 = 1'h0; // @[PMP.scala:177:37] wire _res_T_155 = 1'h0; // @[PMP.scala:177:61] wire _res_T_156 = 1'h0; // @[PMP.scala:177:48] wire _res_T_157 = 1'h0; // @[PMP.scala:178:32] wire _res_T_158 = 1'h0; // @[PMP.scala:178:39] wire _res_T_159 = 1'h0; // @[PMP.scala:178:63] wire _res_T_160 = 1'h0; // @[PMP.scala:178:50] wire _res_T_162 = 1'h0; // @[PMP.scala:177:30] wire _res_T_163 = 1'h0; // @[PMP.scala:177:37] wire _res_T_164 = 1'h0; // @[PMP.scala:177:61] wire _res_T_165 = 1'h0; // @[PMP.scala:177:48] wire _res_T_166 = 1'h0; // @[PMP.scala:178:32] wire _res_T_167 = 1'h0; // @[PMP.scala:178:39] wire _res_T_168 = 1'h0; // @[PMP.scala:178:63] wire _res_T_169 = 1'h0; // @[PMP.scala:178:50] wire _res_T_171 = 1'h0; // @[PMP.scala:177:30] wire _res_T_172 = 1'h0; // @[PMP.scala:177:37] wire _res_T_173 = 1'h0; // @[PMP.scala:177:61] wire _res_T_174 = 1'h0; // @[PMP.scala:177:48] wire _res_T_175 = 1'h0; // @[PMP.scala:178:32] wire _res_T_176 = 1'h0; // @[PMP.scala:178:39] wire _res_T_177 = 1'h0; // @[PMP.scala:178:63] wire _res_T_178 = 1'h0; // @[PMP.scala:178:50] wire res_cur_3_cfg_l = 1'h0; // @[PMP.scala:181:23] wire res_cur_3_cfg_x = 1'h0; // @[PMP.scala:181:23] wire res_cur_3_cfg_w = 1'h0; // @[PMP.scala:181:23] wire res_cur_3_cfg_r = 1'h0; // @[PMP.scala:181:23] wire _res_cur_cfg_r_T_6 = 1'h0; // @[PMP.scala:182:40] wire _res_cur_cfg_r_T_7 = 1'h0; // @[PMP.scala:182:26] wire _res_cur_cfg_w_T_6 = 1'h0; // @[PMP.scala:183:40] wire _res_cur_cfg_w_T_7 = 1'h0; // @[PMP.scala:183:26] wire _res_cur_cfg_x_T_6 = 1'h0; // @[PMP.scala:184:40] wire _res_cur_cfg_x_T_7 = 1'h0; // @[PMP.scala:184:26] wire _res_T_179_cfg_l = 1'h0; // @[PMP.scala:185:8] wire _res_T_179_cfg_x = 1'h0; // @[PMP.scala:185:8] wire _res_T_179_cfg_w = 1'h0; // @[PMP.scala:185:8] wire _res_T_179_cfg_r = 1'h0; // @[PMP.scala:185:8] wire _res_hit_T_52 = 1'h0; // @[PMP.scala:45:20] wire _res_hit_T_54 = 1'h0; // @[PMP.scala:46:26] wire res_hit_msbsLess_8 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_8 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_58 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_59 = 1'h0; // @[PMP.scala:83:16] wire res_hit_msbsLess_9 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_9 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_61 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_62 = 1'h0; // @[PMP.scala:83:16] wire _res_hit_T_63 = 1'h0; // @[PMP.scala:94:48] wire _res_hit_T_64 = 1'h0; // @[PMP.scala:132:61] wire res_hit_4 = 1'h0; // @[PMP.scala:132:8] wire res_ignore_4 = 1'h0; // @[PMP.scala:164:26] wire _res_aligned_straddlesLowerBound_T_84 = 1'h0; // @[PMP.scala:123:147] wire res_aligned_straddlesLowerBound_4 = 1'h0; // @[PMP.scala:123:90] wire _res_aligned_straddlesUpperBound_T_84 = 1'h0; // @[PMP.scala:124:148] wire res_aligned_straddlesUpperBound_4 = 1'h0; // @[PMP.scala:124:85] wire _res_aligned_rangeAligned_T_4 = 1'h0; // @[PMP.scala:125:46] wire _res_aligned_T_4 = 1'h0; // @[PMP.scala:45:20] wire _res_T_181 = 1'h0; // @[PMP.scala:168:32] wire _res_T_182 = 1'h0; // @[PMP.scala:168:32] wire _res_T_183 = 1'h0; // @[PMP.scala:168:32] wire _res_T_184 = 1'h0; // @[PMP.scala:170:30] wire _res_T_188 = 1'h0; // @[PMP.scala:174:60] wire _res_T_190 = 1'h0; // @[PMP.scala:174:60] wire _res_T_192 = 1'h0; // @[PMP.scala:174:60] wire _res_T_194 = 1'h0; // @[PMP.scala:174:60] wire _res_T_196 = 1'h0; // @[PMP.scala:174:60] wire _res_T_198 = 1'h0; // @[PMP.scala:177:30] wire _res_T_199 = 1'h0; // @[PMP.scala:177:37] wire _res_T_200 = 1'h0; // @[PMP.scala:177:61] wire _res_T_201 = 1'h0; // @[PMP.scala:177:48] wire _res_T_202 = 1'h0; // @[PMP.scala:178:32] wire _res_T_203 = 1'h0; // @[PMP.scala:178:39] wire _res_T_204 = 1'h0; // @[PMP.scala:178:63] wire _res_T_205 = 1'h0; // @[PMP.scala:178:50] wire _res_T_207 = 1'h0; // @[PMP.scala:177:30] wire _res_T_208 = 1'h0; // @[PMP.scala:177:37] wire _res_T_209 = 1'h0; // @[PMP.scala:177:61] wire _res_T_210 = 1'h0; // @[PMP.scala:177:48] wire _res_T_211 = 1'h0; // @[PMP.scala:178:32] wire _res_T_212 = 1'h0; // @[PMP.scala:178:39] wire _res_T_213 = 1'h0; // @[PMP.scala:178:63] wire _res_T_214 = 1'h0; // @[PMP.scala:178:50] wire _res_T_216 = 1'h0; // @[PMP.scala:177:30] wire _res_T_217 = 1'h0; // @[PMP.scala:177:37] wire _res_T_218 = 1'h0; // @[PMP.scala:177:61] wire _res_T_219 = 1'h0; // @[PMP.scala:177:48] wire _res_T_220 = 1'h0; // @[PMP.scala:178:32] wire _res_T_221 = 1'h0; // @[PMP.scala:178:39] wire _res_T_222 = 1'h0; // @[PMP.scala:178:63] wire _res_T_223 = 1'h0; // @[PMP.scala:178:50] wire res_cur_4_cfg_l = 1'h0; // @[PMP.scala:181:23] wire res_cur_4_cfg_x = 1'h0; // @[PMP.scala:181:23] wire res_cur_4_cfg_w = 1'h0; // @[PMP.scala:181:23] wire res_cur_4_cfg_r = 1'h0; // @[PMP.scala:181:23] wire _res_cur_cfg_r_T_8 = 1'h0; // @[PMP.scala:182:40] wire _res_cur_cfg_r_T_9 = 1'h0; // @[PMP.scala:182:26] wire _res_cur_cfg_w_T_8 = 1'h0; // @[PMP.scala:183:40] wire _res_cur_cfg_w_T_9 = 1'h0; // @[PMP.scala:183:26] wire _res_cur_cfg_x_T_8 = 1'h0; // @[PMP.scala:184:40] wire _res_cur_cfg_x_T_9 = 1'h0; // @[PMP.scala:184:26] wire _res_T_224_cfg_l = 1'h0; // @[PMP.scala:185:8] wire _res_T_224_cfg_x = 1'h0; // @[PMP.scala:185:8] wire _res_T_224_cfg_w = 1'h0; // @[PMP.scala:185:8] wire _res_T_224_cfg_r = 1'h0; // @[PMP.scala:185:8] wire _res_hit_T_65 = 1'h0; // @[PMP.scala:45:20] wire _res_hit_T_67 = 1'h0; // @[PMP.scala:46:26] wire res_hit_msbsLess_10 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_10 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_71 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_72 = 1'h0; // @[PMP.scala:83:16] wire res_hit_msbsLess_11 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_11 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_74 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_75 = 1'h0; // @[PMP.scala:83:16] wire _res_hit_T_76 = 1'h0; // @[PMP.scala:94:48] wire _res_hit_T_77 = 1'h0; // @[PMP.scala:132:61] wire res_hit_5 = 1'h0; // @[PMP.scala:132:8] wire res_ignore_5 = 1'h0; // @[PMP.scala:164:26] wire _res_aligned_straddlesLowerBound_T_101 = 1'h0; // @[PMP.scala:123:147] wire res_aligned_straddlesLowerBound_5 = 1'h0; // @[PMP.scala:123:90] wire _res_aligned_straddlesUpperBound_T_101 = 1'h0; // @[PMP.scala:124:148] wire res_aligned_straddlesUpperBound_5 = 1'h0; // @[PMP.scala:124:85] wire _res_aligned_rangeAligned_T_5 = 1'h0; // @[PMP.scala:125:46] wire _res_aligned_T_5 = 1'h0; // @[PMP.scala:45:20] wire _res_T_226 = 1'h0; // @[PMP.scala:168:32] wire _res_T_227 = 1'h0; // @[PMP.scala:168:32] wire _res_T_228 = 1'h0; // @[PMP.scala:168:32] wire _res_T_229 = 1'h0; // @[PMP.scala:170:30] wire _res_T_233 = 1'h0; // @[PMP.scala:174:60] wire _res_T_235 = 1'h0; // @[PMP.scala:174:60] wire _res_T_237 = 1'h0; // @[PMP.scala:174:60] wire _res_T_239 = 1'h0; // @[PMP.scala:174:60] wire _res_T_241 = 1'h0; // @[PMP.scala:174:60] wire _res_T_243 = 1'h0; // @[PMP.scala:177:30] wire _res_T_244 = 1'h0; // @[PMP.scala:177:37] wire _res_T_245 = 1'h0; // @[PMP.scala:177:61] wire _res_T_246 = 1'h0; // @[PMP.scala:177:48] wire _res_T_247 = 1'h0; // @[PMP.scala:178:32] wire _res_T_248 = 1'h0; // @[PMP.scala:178:39] wire _res_T_249 = 1'h0; // @[PMP.scala:178:63] wire _res_T_250 = 1'h0; // @[PMP.scala:178:50] wire _res_T_252 = 1'h0; // @[PMP.scala:177:30] wire _res_T_253 = 1'h0; // @[PMP.scala:177:37] wire _res_T_254 = 1'h0; // @[PMP.scala:177:61] wire _res_T_255 = 1'h0; // @[PMP.scala:177:48] wire _res_T_256 = 1'h0; // @[PMP.scala:178:32] wire _res_T_257 = 1'h0; // @[PMP.scala:178:39] wire _res_T_258 = 1'h0; // @[PMP.scala:178:63] wire _res_T_259 = 1'h0; // @[PMP.scala:178:50] wire _res_T_261 = 1'h0; // @[PMP.scala:177:30] wire _res_T_262 = 1'h0; // @[PMP.scala:177:37] wire _res_T_263 = 1'h0; // @[PMP.scala:177:61] wire _res_T_264 = 1'h0; // @[PMP.scala:177:48] wire _res_T_265 = 1'h0; // @[PMP.scala:178:32] wire _res_T_266 = 1'h0; // @[PMP.scala:178:39] wire _res_T_267 = 1'h0; // @[PMP.scala:178:63] wire _res_T_268 = 1'h0; // @[PMP.scala:178:50] wire res_cur_5_cfg_l = 1'h0; // @[PMP.scala:181:23] wire res_cur_5_cfg_x = 1'h0; // @[PMP.scala:181:23] wire res_cur_5_cfg_w = 1'h0; // @[PMP.scala:181:23] wire res_cur_5_cfg_r = 1'h0; // @[PMP.scala:181:23] wire _res_cur_cfg_r_T_10 = 1'h0; // @[PMP.scala:182:40] wire _res_cur_cfg_r_T_11 = 1'h0; // @[PMP.scala:182:26] wire _res_cur_cfg_w_T_10 = 1'h0; // @[PMP.scala:183:40] wire _res_cur_cfg_w_T_11 = 1'h0; // @[PMP.scala:183:26] wire _res_cur_cfg_x_T_10 = 1'h0; // @[PMP.scala:184:40] wire _res_cur_cfg_x_T_11 = 1'h0; // @[PMP.scala:184:26] wire _res_T_269_cfg_l = 1'h0; // @[PMP.scala:185:8] wire _res_T_269_cfg_x = 1'h0; // @[PMP.scala:185:8] wire _res_T_269_cfg_w = 1'h0; // @[PMP.scala:185:8] wire _res_T_269_cfg_r = 1'h0; // @[PMP.scala:185:8] wire _res_hit_T_78 = 1'h0; // @[PMP.scala:45:20] wire _res_hit_T_80 = 1'h0; // @[PMP.scala:46:26] wire res_hit_msbsLess_12 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_12 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_84 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_85 = 1'h0; // @[PMP.scala:83:16] wire res_hit_msbsLess_13 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_13 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_87 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_88 = 1'h0; // @[PMP.scala:83:16] wire _res_hit_T_89 = 1'h0; // @[PMP.scala:94:48] wire _res_hit_T_90 = 1'h0; // @[PMP.scala:132:61] wire res_hit_6 = 1'h0; // @[PMP.scala:132:8] wire res_ignore_6 = 1'h0; // @[PMP.scala:164:26] wire _res_aligned_straddlesLowerBound_T_118 = 1'h0; // @[PMP.scala:123:147] wire res_aligned_straddlesLowerBound_6 = 1'h0; // @[PMP.scala:123:90] wire _res_aligned_straddlesUpperBound_T_118 = 1'h0; // @[PMP.scala:124:148] wire res_aligned_straddlesUpperBound_6 = 1'h0; // @[PMP.scala:124:85] wire _res_aligned_rangeAligned_T_6 = 1'h0; // @[PMP.scala:125:46] wire _res_aligned_T_6 = 1'h0; // @[PMP.scala:45:20] wire _res_T_271 = 1'h0; // @[PMP.scala:168:32] wire _res_T_272 = 1'h0; // @[PMP.scala:168:32] wire _res_T_273 = 1'h0; // @[PMP.scala:168:32] wire _res_T_274 = 1'h0; // @[PMP.scala:170:30] wire _res_T_278 = 1'h0; // @[PMP.scala:174:60] wire _res_T_280 = 1'h0; // @[PMP.scala:174:60] wire _res_T_282 = 1'h0; // @[PMP.scala:174:60] wire _res_T_284 = 1'h0; // @[PMP.scala:174:60] wire _res_T_286 = 1'h0; // @[PMP.scala:174:60] wire _res_T_288 = 1'h0; // @[PMP.scala:177:30] wire _res_T_289 = 1'h0; // @[PMP.scala:177:37] wire _res_T_290 = 1'h0; // @[PMP.scala:177:61] wire _res_T_291 = 1'h0; // @[PMP.scala:177:48] wire _res_T_292 = 1'h0; // @[PMP.scala:178:32] wire _res_T_293 = 1'h0; // @[PMP.scala:178:39] wire _res_T_294 = 1'h0; // @[PMP.scala:178:63] wire _res_T_295 = 1'h0; // @[PMP.scala:178:50] wire _res_T_297 = 1'h0; // @[PMP.scala:177:30] wire _res_T_298 = 1'h0; // @[PMP.scala:177:37] wire _res_T_299 = 1'h0; // @[PMP.scala:177:61] wire _res_T_300 = 1'h0; // @[PMP.scala:177:48] wire _res_T_301 = 1'h0; // @[PMP.scala:178:32] wire _res_T_302 = 1'h0; // @[PMP.scala:178:39] wire _res_T_303 = 1'h0; // @[PMP.scala:178:63] wire _res_T_304 = 1'h0; // @[PMP.scala:178:50] wire _res_T_306 = 1'h0; // @[PMP.scala:177:30] wire _res_T_307 = 1'h0; // @[PMP.scala:177:37] wire _res_T_308 = 1'h0; // @[PMP.scala:177:61] wire _res_T_309 = 1'h0; // @[PMP.scala:177:48] wire _res_T_310 = 1'h0; // @[PMP.scala:178:32] wire _res_T_311 = 1'h0; // @[PMP.scala:178:39] wire _res_T_312 = 1'h0; // @[PMP.scala:178:63] wire _res_T_313 = 1'h0; // @[PMP.scala:178:50] wire res_cur_6_cfg_l = 1'h0; // @[PMP.scala:181:23] wire res_cur_6_cfg_x = 1'h0; // @[PMP.scala:181:23] wire res_cur_6_cfg_w = 1'h0; // @[PMP.scala:181:23] wire res_cur_6_cfg_r = 1'h0; // @[PMP.scala:181:23] wire _res_cur_cfg_r_T_12 = 1'h0; // @[PMP.scala:182:40] wire _res_cur_cfg_r_T_13 = 1'h0; // @[PMP.scala:182:26] wire _res_cur_cfg_w_T_12 = 1'h0; // @[PMP.scala:183:40] wire _res_cur_cfg_w_T_13 = 1'h0; // @[PMP.scala:183:26] wire _res_cur_cfg_x_T_12 = 1'h0; // @[PMP.scala:184:40] wire _res_cur_cfg_x_T_13 = 1'h0; // @[PMP.scala:184:26] wire _res_T_314_cfg_l = 1'h0; // @[PMP.scala:185:8] wire _res_T_314_cfg_x = 1'h0; // @[PMP.scala:185:8] wire _res_T_314_cfg_w = 1'h0; // @[PMP.scala:185:8] wire _res_T_314_cfg_r = 1'h0; // @[PMP.scala:185:8] wire _res_hit_T_91 = 1'h0; // @[PMP.scala:45:20] wire _res_hit_T_93 = 1'h0; // @[PMP.scala:46:26] wire res_hit_msbsLess_14 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_14 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_97 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_98 = 1'h0; // @[PMP.scala:83:16] wire res_hit_msbsLess_15 = 1'h0; // @[PMP.scala:80:39] wire res_hit_lsbsLess_15 = 1'h0; // @[PMP.scala:82:53] wire _res_hit_T_100 = 1'h0; // @[PMP.scala:83:30] wire _res_hit_T_101 = 1'h0; // @[PMP.scala:83:16] wire _res_hit_T_102 = 1'h0; // @[PMP.scala:94:48] wire _res_hit_T_103 = 1'h0; // @[PMP.scala:132:61] wire res_hit_7 = 1'h0; // @[PMP.scala:132:8] wire res_ignore_7 = 1'h0; // @[PMP.scala:164:26] wire _res_aligned_straddlesLowerBound_T_135 = 1'h0; // @[PMP.scala:123:147] wire res_aligned_straddlesLowerBound_7 = 1'h0; // @[PMP.scala:123:90] wire _res_aligned_straddlesUpperBound_T_135 = 1'h0; // @[PMP.scala:124:148] wire res_aligned_straddlesUpperBound_7 = 1'h0; // @[PMP.scala:124:85] wire _res_aligned_rangeAligned_T_7 = 1'h0; // @[PMP.scala:125:46] wire _res_aligned_T_7 = 1'h0; // @[PMP.scala:45:20] wire _res_T_316 = 1'h0; // @[PMP.scala:168:32] wire _res_T_317 = 1'h0; // @[PMP.scala:168:32] wire _res_T_318 = 1'h0; // @[PMP.scala:168:32] wire _res_T_319 = 1'h0; // @[PMP.scala:170:30] wire _res_T_323 = 1'h0; // @[PMP.scala:174:60] wire _res_T_325 = 1'h0; // @[PMP.scala:174:60] wire _res_T_327 = 1'h0; // @[PMP.scala:174:60] wire _res_T_329 = 1'h0; // @[PMP.scala:174:60] wire _res_T_331 = 1'h0; // @[PMP.scala:174:60] wire _res_T_333 = 1'h0; // @[PMP.scala:177:30] wire _res_T_334 = 1'h0; // @[PMP.scala:177:37] wire _res_T_335 = 1'h0; // @[PMP.scala:177:61] wire _res_T_336 = 1'h0; // @[PMP.scala:177:48] wire _res_T_337 = 1'h0; // @[PMP.scala:178:32] wire _res_T_338 = 1'h0; // @[PMP.scala:178:39] wire _res_T_339 = 1'h0; // @[PMP.scala:178:63] wire _res_T_340 = 1'h0; // @[PMP.scala:178:50] wire _res_T_342 = 1'h0; // @[PMP.scala:177:30] wire _res_T_343 = 1'h0; // @[PMP.scala:177:37] wire _res_T_344 = 1'h0; // @[PMP.scala:177:61] wire _res_T_345 = 1'h0; // @[PMP.scala:177:48] wire _res_T_346 = 1'h0; // @[PMP.scala:178:32] wire _res_T_347 = 1'h0; // @[PMP.scala:178:39] wire _res_T_348 = 1'h0; // @[PMP.scala:178:63] wire _res_T_349 = 1'h0; // @[PMP.scala:178:50] wire _res_T_351 = 1'h0; // @[PMP.scala:177:30] wire _res_T_352 = 1'h0; // @[PMP.scala:177:37] wire _res_T_353 = 1'h0; // @[PMP.scala:177:61] wire _res_T_354 = 1'h0; // @[PMP.scala:177:48] wire _res_T_355 = 1'h0; // @[PMP.scala:178:32] wire _res_T_356 = 1'h0; // @[PMP.scala:178:39] wire _res_T_357 = 1'h0; // @[PMP.scala:178:63] wire _res_T_358 = 1'h0; // @[PMP.scala:178:50] wire res_cur_7_cfg_l = 1'h0; // @[PMP.scala:181:23] wire res_cur_7_cfg_x = 1'h0; // @[PMP.scala:181:23] wire res_cur_7_cfg_w = 1'h0; // @[PMP.scala:181:23] wire res_cur_7_cfg_r = 1'h0; // @[PMP.scala:181:23] wire _res_cur_cfg_r_T_14 = 1'h0; // @[PMP.scala:182:40] wire _res_cur_cfg_r_T_15 = 1'h0; // @[PMP.scala:182:26] wire _res_cur_cfg_w_T_14 = 1'h0; // @[PMP.scala:183:40] wire _res_cur_cfg_w_T_15 = 1'h0; // @[PMP.scala:183:26] wire _res_cur_cfg_x_T_14 = 1'h0; // @[PMP.scala:184:40] wire _res_cur_cfg_x_T_15 = 1'h0; // @[PMP.scala:184:26] wire res_cfg_l = 1'h0; // @[PMP.scala:185:8] wire res_cfg_x = 1'h0; // @[PMP.scala:185:8] wire res_cfg_w = 1'h0; // @[PMP.scala:185:8] wire res_cfg_r = 1'h0; // @[PMP.scala:185:8] wire [1:0] io_prv = 2'h1; // @[PMP.scala:143:7, :146:14] wire [5:0] _GEN = 6'h7 << io_size_0; // @[package.scala:243:71] wire [5:0] _res_hit_lsbMask_T; // @[package.scala:243:71] assign _res_hit_lsbMask_T = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_T_3; // @[package.scala:243:71] assign _res_hit_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _res_aligned_lsbMask_T; // @[package.scala:243:71] assign _res_aligned_lsbMask_T = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_lsbMask_T_3; // @[package.scala:243:71] assign _res_hit_lsbMask_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_T_16; // @[package.scala:243:71] assign _res_hit_T_16 = _GEN; // @[package.scala:243:71] wire [5:0] _res_aligned_lsbMask_T_2; // @[package.scala:243:71] assign _res_aligned_lsbMask_T_2 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_lsbMask_T_6; // @[package.scala:243:71] assign _res_hit_lsbMask_T_6 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_T_29; // @[package.scala:243:71] assign _res_hit_T_29 = _GEN; // @[package.scala:243:71] wire [5:0] _res_aligned_lsbMask_T_4; // @[package.scala:243:71] assign _res_aligned_lsbMask_T_4 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_lsbMask_T_9; // @[package.scala:243:71] assign _res_hit_lsbMask_T_9 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_T_42; // @[package.scala:243:71] assign _res_hit_T_42 = _GEN; // @[package.scala:243:71] wire [5:0] _res_aligned_lsbMask_T_6; // @[package.scala:243:71] assign _res_aligned_lsbMask_T_6 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_lsbMask_T_12; // @[package.scala:243:71] assign _res_hit_lsbMask_T_12 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_T_55; // @[package.scala:243:71] assign _res_hit_T_55 = _GEN; // @[package.scala:243:71] wire [5:0] _res_aligned_lsbMask_T_8; // @[package.scala:243:71] assign _res_aligned_lsbMask_T_8 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_lsbMask_T_15; // @[package.scala:243:71] assign _res_hit_lsbMask_T_15 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_T_68; // @[package.scala:243:71] assign _res_hit_T_68 = _GEN; // @[package.scala:243:71] wire [5:0] _res_aligned_lsbMask_T_10; // @[package.scala:243:71] assign _res_aligned_lsbMask_T_10 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_lsbMask_T_18; // @[package.scala:243:71] assign _res_hit_lsbMask_T_18 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_T_81; // @[package.scala:243:71] assign _res_hit_T_81 = _GEN; // @[package.scala:243:71] wire [5:0] _res_aligned_lsbMask_T_12; // @[package.scala:243:71] assign _res_aligned_lsbMask_T_12 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_lsbMask_T_21; // @[package.scala:243:71] assign _res_hit_lsbMask_T_21 = _GEN; // @[package.scala:243:71] wire [5:0] _res_hit_T_94; // @[package.scala:243:71] assign _res_hit_T_94 = _GEN; // @[package.scala:243:71] wire [5:0] _res_aligned_lsbMask_T_14; // @[package.scala:243:71] assign _res_aligned_lsbMask_T_14 = _GEN; // @[package.scala:243:71] wire [2:0] _res_hit_lsbMask_T_1 = _res_hit_lsbMask_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_lsbMask_T_2 = ~_res_hit_lsbMask_T_1; // @[package.scala:243:{46,76}] wire [31:0] res_hit_lsbMask = {29'h0, _res_hit_lsbMask_T_2}; // @[package.scala:243:46] wire [28:0] _res_hit_msbMatch_T = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7] wire [28:0] _res_hit_msbsLess_T = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_hit_msbsLess_T_6 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_7 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_aligned_straddlesLowerBound_T = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7] wire [28:0] _res_aligned_straddlesUpperBound_T = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7] wire [28:0] _res_hit_msbMatch_T_10 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7] wire [28:0] _res_hit_msbsLess_T_12 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_14 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_hit_msbsLess_T_18 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_21 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_aligned_straddlesLowerBound_T_17 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7] wire [28:0] _res_aligned_straddlesUpperBound_T_17 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7] wire [28:0] _res_hit_msbMatch_T_20 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7] wire [28:0] _res_hit_msbsLess_T_24 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_28 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_hit_msbsLess_T_30 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_35 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_aligned_straddlesLowerBound_T_34 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7] wire [28:0] _res_aligned_straddlesUpperBound_T_34 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7] wire [28:0] _res_hit_msbMatch_T_30 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7] wire [28:0] _res_hit_msbsLess_T_36 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_42 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_hit_msbsLess_T_42 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_49 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_aligned_straddlesLowerBound_T_51 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7] wire [28:0] _res_aligned_straddlesUpperBound_T_51 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7] wire [28:0] _res_hit_msbMatch_T_40 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7] wire [28:0] _res_hit_msbsLess_T_48 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_56 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_hit_msbsLess_T_54 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_63 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_aligned_straddlesLowerBound_T_68 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7] wire [28:0] _res_aligned_straddlesUpperBound_T_68 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7] wire [28:0] _res_hit_msbMatch_T_50 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7] wire [28:0] _res_hit_msbsLess_T_60 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_70 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_hit_msbsLess_T_66 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_77 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_aligned_straddlesLowerBound_T_85 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7] wire [28:0] _res_aligned_straddlesUpperBound_T_85 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7] wire [28:0] _res_hit_msbMatch_T_60 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7] wire [28:0] _res_hit_msbsLess_T_72 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_84 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_hit_msbsLess_T_78 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_91 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_aligned_straddlesLowerBound_T_102 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7] wire [28:0] _res_aligned_straddlesUpperBound_T_102 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7] wire [28:0] _res_hit_msbMatch_T_70 = io_addr_0[31:3]; // @[PMP.scala:69:29, :143:7] wire [28:0] _res_hit_msbsLess_T_84 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_98 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_hit_msbsLess_T_90 = io_addr_0[31:3]; // @[PMP.scala:69:29, :80:25, :143:7] wire [28:0] _res_hit_msbsEqual_T_105 = io_addr_0[31:3]; // @[PMP.scala:69:29, :81:27, :143:7] wire [28:0] _res_aligned_straddlesLowerBound_T_119 = io_addr_0[31:3]; // @[PMP.scala:69:29, :123:35, :143:7] wire [28:0] _res_aligned_straddlesUpperBound_T_119 = io_addr_0[31:3]; // @[PMP.scala:69:29, :124:35, :143:7] wire [28:0] _res_hit_msbMatch_T_7 = _res_hit_msbMatch_T; // @[PMP.scala:63:47, :69:29] wire [28:0] _res_hit_msbMatch_T_9 = _res_hit_msbMatch_T_7; // @[PMP.scala:63:{47,52}] wire res_hit_msbMatch = _res_hit_msbMatch_T_9 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [2:0] _res_hit_lsbMatch_T = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7] wire [2:0] _res_hit_lsbsLess_T = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_hit_lsbsLess_T_7 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_aligned_straddlesLowerBound_T_13 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7] wire [2:0] _res_aligned_straddlesUpperBound_T_13 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7] wire [2:0] _res_hit_lsbMatch_T_10 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7] wire [2:0] _res_hit_lsbsLess_T_14 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_hit_lsbsLess_T_21 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_aligned_straddlesLowerBound_T_30 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7] wire [2:0] _res_aligned_straddlesUpperBound_T_30 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7] wire [2:0] _res_hit_lsbMatch_T_20 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7] wire [2:0] _res_hit_lsbsLess_T_28 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_hit_lsbsLess_T_35 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_aligned_straddlesLowerBound_T_47 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7] wire [2:0] _res_aligned_straddlesUpperBound_T_47 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7] wire [2:0] _res_hit_lsbMatch_T_30 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7] wire [2:0] _res_hit_lsbsLess_T_42 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_hit_lsbsLess_T_49 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_aligned_straddlesLowerBound_T_64 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7] wire [2:0] _res_aligned_straddlesUpperBound_T_64 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7] wire [2:0] _res_hit_lsbMatch_T_40 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7] wire [2:0] _res_hit_lsbsLess_T_56 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_hit_lsbsLess_T_63 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_aligned_straddlesLowerBound_T_81 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7] wire [2:0] _res_aligned_straddlesUpperBound_T_81 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7] wire [2:0] _res_hit_lsbMatch_T_50 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7] wire [2:0] _res_hit_lsbsLess_T_70 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_hit_lsbsLess_T_77 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_aligned_straddlesLowerBound_T_98 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7] wire [2:0] _res_aligned_straddlesUpperBound_T_98 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7] wire [2:0] _res_hit_lsbMatch_T_60 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7] wire [2:0] _res_hit_lsbsLess_T_84 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_hit_lsbsLess_T_91 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_aligned_straddlesLowerBound_T_115 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7] wire [2:0] _res_aligned_straddlesUpperBound_T_115 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7] wire [2:0] _res_hit_lsbMatch_T_70 = io_addr_0[2:0]; // @[PMP.scala:70:28, :143:7] wire [2:0] _res_hit_lsbsLess_T_98 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_hit_lsbsLess_T_105 = io_addr_0[2:0]; // @[PMP.scala:70:28, :82:25, :143:7] wire [2:0] _res_aligned_straddlesLowerBound_T_132 = io_addr_0[2:0]; // @[PMP.scala:70:28, :123:129, :143:7] wire [2:0] _res_aligned_straddlesUpperBound_T_132 = io_addr_0[2:0]; // @[PMP.scala:70:28, :124:119, :143:7] wire [2:0] _res_hit_lsbMatch_T_7 = _res_hit_lsbMatch_T; // @[PMP.scala:63:47, :70:28] wire [2:0] _res_hit_lsbMatch_T_6 = res_hit_lsbMask[2:0]; // @[PMP.scala:68:26, :70:80] wire [2:0] _res_hit_lsbMatch_T_8 = ~_res_hit_lsbMatch_T_6; // @[PMP.scala:63:54, :70:80] wire [2:0] _res_hit_lsbMatch_T_9 = _res_hit_lsbMatch_T_7 & _res_hit_lsbMatch_T_8; // @[PMP.scala:63:{47,52,54}] wire res_hit_lsbMatch = _res_hit_lsbMatch_T_9 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire _res_hit_T_1 = res_hit_msbMatch & res_hit_lsbMatch; // @[PMP.scala:63:58, :71:16] wire [2:0] _res_hit_T_4 = _res_hit_T_3[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_T_5 = ~_res_hit_T_4; // @[package.scala:243:{46,76}] wire [28:0] _res_hit_msbsEqual_T_6 = _res_hit_msbsEqual_T; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual = _res_hit_msbsEqual_T_6 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_1 = _res_hit_lsbsLess_T | _res_hit_T_5; // @[package.scala:243:46] wire [28:0] _res_hit_msbsEqual_T_13 = _res_hit_msbsEqual_T_7; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_1 = _res_hit_msbsEqual_T_13 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_8 = _res_hit_lsbsLess_T_7; // @[PMP.scala:82:{25,42}] wire [2:0] _res_aligned_lsbMask_T_1 = _res_aligned_lsbMask_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] res_aligned_lsbMask = ~_res_aligned_lsbMask_T_1; // @[package.scala:243:{46,76}] wire [2:0] _res_aligned_pow2Aligned_T_2 = res_aligned_lsbMask; // @[package.scala:243:46] wire [28:0] _res_aligned_straddlesLowerBound_T_6 = _res_aligned_straddlesLowerBound_T; // @[PMP.scala:123:{35,49}] wire _res_aligned_straddlesLowerBound_T_7 = _res_aligned_straddlesLowerBound_T_6 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62] wire [2:0] _res_aligned_straddlesLowerBound_T_14 = ~_res_aligned_straddlesLowerBound_T_13; // @[PMP.scala:123:{127,129}] wire [28:0] _res_aligned_straddlesUpperBound_T_6 = _res_aligned_straddlesUpperBound_T; // @[PMP.scala:124:{35,49}] wire _res_aligned_straddlesUpperBound_T_7 = _res_aligned_straddlesUpperBound_T_6 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}] wire [2:0] _res_aligned_straddlesUpperBound_T_14 = _res_aligned_straddlesUpperBound_T_13 | res_aligned_lsbMask; // @[package.scala:243:46] wire res_aligned_pow2Aligned = _res_aligned_pow2Aligned_T_2 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26] wire [2:0] _res_hit_lsbMask_T_4 = _res_hit_lsbMask_T_3[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_lsbMask_T_5 = ~_res_hit_lsbMask_T_4; // @[package.scala:243:{46,76}] wire [31:0] res_hit_lsbMask_1 = {29'h0, _res_hit_lsbMask_T_5}; // @[package.scala:243:46] wire [28:0] _res_hit_msbMatch_T_17 = _res_hit_msbMatch_T_10; // @[PMP.scala:63:47, :69:29] wire [28:0] _res_hit_msbMatch_T_19 = _res_hit_msbMatch_T_17; // @[PMP.scala:63:{47,52}] wire res_hit_msbMatch_1 = _res_hit_msbMatch_T_19 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [2:0] _res_hit_lsbMatch_T_17 = _res_hit_lsbMatch_T_10; // @[PMP.scala:63:47, :70:28] wire [2:0] _res_hit_lsbMatch_T_16 = res_hit_lsbMask_1[2:0]; // @[PMP.scala:68:26, :70:80] wire [2:0] _res_hit_lsbMatch_T_18 = ~_res_hit_lsbMatch_T_16; // @[PMP.scala:63:54, :70:80] wire [2:0] _res_hit_lsbMatch_T_19 = _res_hit_lsbMatch_T_17 & _res_hit_lsbMatch_T_18; // @[PMP.scala:63:{47,52,54}] wire res_hit_lsbMatch_1 = _res_hit_lsbMatch_T_19 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire _res_hit_T_14 = res_hit_msbMatch_1 & res_hit_lsbMatch_1; // @[PMP.scala:63:58, :71:16] wire [2:0] _res_hit_T_17 = _res_hit_T_16[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_T_18 = ~_res_hit_T_17; // @[package.scala:243:{46,76}] wire [28:0] _res_hit_msbsEqual_T_20 = _res_hit_msbsEqual_T_14; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_2 = _res_hit_msbsEqual_T_20 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_15 = _res_hit_lsbsLess_T_14 | _res_hit_T_18; // @[package.scala:243:46] wire [28:0] _res_hit_msbsEqual_T_27 = _res_hit_msbsEqual_T_21; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_3 = _res_hit_msbsEqual_T_27 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_22 = _res_hit_lsbsLess_T_21; // @[PMP.scala:82:{25,42}] wire [2:0] _res_aligned_lsbMask_T_3 = _res_aligned_lsbMask_T_2[2:0]; // @[package.scala:243:{71,76}] wire [2:0] res_aligned_lsbMask_1 = ~_res_aligned_lsbMask_T_3; // @[package.scala:243:{46,76}] wire [2:0] _res_aligned_pow2Aligned_T_5 = res_aligned_lsbMask_1; // @[package.scala:243:46] wire [28:0] _res_aligned_straddlesLowerBound_T_23 = _res_aligned_straddlesLowerBound_T_17; // @[PMP.scala:123:{35,49}] wire _res_aligned_straddlesLowerBound_T_24 = _res_aligned_straddlesLowerBound_T_23 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62] wire [2:0] _res_aligned_straddlesLowerBound_T_31 = ~_res_aligned_straddlesLowerBound_T_30; // @[PMP.scala:123:{127,129}] wire [28:0] _res_aligned_straddlesUpperBound_T_23 = _res_aligned_straddlesUpperBound_T_17; // @[PMP.scala:124:{35,49}] wire _res_aligned_straddlesUpperBound_T_24 = _res_aligned_straddlesUpperBound_T_23 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}] wire [2:0] _res_aligned_straddlesUpperBound_T_31 = _res_aligned_straddlesUpperBound_T_30 | res_aligned_lsbMask_1; // @[package.scala:243:46] wire res_aligned_pow2Aligned_1 = _res_aligned_pow2Aligned_T_5 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26] wire [2:0] _res_hit_lsbMask_T_7 = _res_hit_lsbMask_T_6[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_lsbMask_T_8 = ~_res_hit_lsbMask_T_7; // @[package.scala:243:{46,76}] wire [31:0] res_hit_lsbMask_2 = {29'h0, _res_hit_lsbMask_T_8}; // @[package.scala:243:46] wire [28:0] _res_hit_msbMatch_T_27 = _res_hit_msbMatch_T_20; // @[PMP.scala:63:47, :69:29] wire [28:0] _res_hit_msbMatch_T_29 = _res_hit_msbMatch_T_27; // @[PMP.scala:63:{47,52}] wire res_hit_msbMatch_2 = _res_hit_msbMatch_T_29 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [2:0] _res_hit_lsbMatch_T_27 = _res_hit_lsbMatch_T_20; // @[PMP.scala:63:47, :70:28] wire [2:0] _res_hit_lsbMatch_T_26 = res_hit_lsbMask_2[2:0]; // @[PMP.scala:68:26, :70:80] wire [2:0] _res_hit_lsbMatch_T_28 = ~_res_hit_lsbMatch_T_26; // @[PMP.scala:63:54, :70:80] wire [2:0] _res_hit_lsbMatch_T_29 = _res_hit_lsbMatch_T_27 & _res_hit_lsbMatch_T_28; // @[PMP.scala:63:{47,52,54}] wire res_hit_lsbMatch_2 = _res_hit_lsbMatch_T_29 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire _res_hit_T_27 = res_hit_msbMatch_2 & res_hit_lsbMatch_2; // @[PMP.scala:63:58, :71:16] wire [2:0] _res_hit_T_30 = _res_hit_T_29[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_T_31 = ~_res_hit_T_30; // @[package.scala:243:{46,76}] wire [28:0] _res_hit_msbsEqual_T_34 = _res_hit_msbsEqual_T_28; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_4 = _res_hit_msbsEqual_T_34 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_29 = _res_hit_lsbsLess_T_28 | _res_hit_T_31; // @[package.scala:243:46] wire [28:0] _res_hit_msbsEqual_T_41 = _res_hit_msbsEqual_T_35; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_5 = _res_hit_msbsEqual_T_41 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_36 = _res_hit_lsbsLess_T_35; // @[PMP.scala:82:{25,42}] wire [2:0] _res_aligned_lsbMask_T_5 = _res_aligned_lsbMask_T_4[2:0]; // @[package.scala:243:{71,76}] wire [2:0] res_aligned_lsbMask_2 = ~_res_aligned_lsbMask_T_5; // @[package.scala:243:{46,76}] wire [2:0] _res_aligned_pow2Aligned_T_8 = res_aligned_lsbMask_2; // @[package.scala:243:46] wire [28:0] _res_aligned_straddlesLowerBound_T_40 = _res_aligned_straddlesLowerBound_T_34; // @[PMP.scala:123:{35,49}] wire _res_aligned_straddlesLowerBound_T_41 = _res_aligned_straddlesLowerBound_T_40 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62] wire [2:0] _res_aligned_straddlesLowerBound_T_48 = ~_res_aligned_straddlesLowerBound_T_47; // @[PMP.scala:123:{127,129}] wire [28:0] _res_aligned_straddlesUpperBound_T_40 = _res_aligned_straddlesUpperBound_T_34; // @[PMP.scala:124:{35,49}] wire _res_aligned_straddlesUpperBound_T_41 = _res_aligned_straddlesUpperBound_T_40 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}] wire [2:0] _res_aligned_straddlesUpperBound_T_48 = _res_aligned_straddlesUpperBound_T_47 | res_aligned_lsbMask_2; // @[package.scala:243:46] wire res_aligned_pow2Aligned_2 = _res_aligned_pow2Aligned_T_8 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26] wire [2:0] _res_hit_lsbMask_T_10 = _res_hit_lsbMask_T_9[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_lsbMask_T_11 = ~_res_hit_lsbMask_T_10; // @[package.scala:243:{46,76}] wire [31:0] res_hit_lsbMask_3 = {29'h0, _res_hit_lsbMask_T_11}; // @[package.scala:243:46] wire [28:0] _res_hit_msbMatch_T_37 = _res_hit_msbMatch_T_30; // @[PMP.scala:63:47, :69:29] wire [28:0] _res_hit_msbMatch_T_39 = _res_hit_msbMatch_T_37; // @[PMP.scala:63:{47,52}] wire res_hit_msbMatch_3 = _res_hit_msbMatch_T_39 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [2:0] _res_hit_lsbMatch_T_37 = _res_hit_lsbMatch_T_30; // @[PMP.scala:63:47, :70:28] wire [2:0] _res_hit_lsbMatch_T_36 = res_hit_lsbMask_3[2:0]; // @[PMP.scala:68:26, :70:80] wire [2:0] _res_hit_lsbMatch_T_38 = ~_res_hit_lsbMatch_T_36; // @[PMP.scala:63:54, :70:80] wire [2:0] _res_hit_lsbMatch_T_39 = _res_hit_lsbMatch_T_37 & _res_hit_lsbMatch_T_38; // @[PMP.scala:63:{47,52,54}] wire res_hit_lsbMatch_3 = _res_hit_lsbMatch_T_39 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire _res_hit_T_40 = res_hit_msbMatch_3 & res_hit_lsbMatch_3; // @[PMP.scala:63:58, :71:16] wire [2:0] _res_hit_T_43 = _res_hit_T_42[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_T_44 = ~_res_hit_T_43; // @[package.scala:243:{46,76}] wire [28:0] _res_hit_msbsEqual_T_48 = _res_hit_msbsEqual_T_42; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_6 = _res_hit_msbsEqual_T_48 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_43 = _res_hit_lsbsLess_T_42 | _res_hit_T_44; // @[package.scala:243:46] wire [28:0] _res_hit_msbsEqual_T_55 = _res_hit_msbsEqual_T_49; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_7 = _res_hit_msbsEqual_T_55 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_50 = _res_hit_lsbsLess_T_49; // @[PMP.scala:82:{25,42}] wire [2:0] _res_aligned_lsbMask_T_7 = _res_aligned_lsbMask_T_6[2:0]; // @[package.scala:243:{71,76}] wire [2:0] res_aligned_lsbMask_3 = ~_res_aligned_lsbMask_T_7; // @[package.scala:243:{46,76}] wire [2:0] _res_aligned_pow2Aligned_T_11 = res_aligned_lsbMask_3; // @[package.scala:243:46] wire [28:0] _res_aligned_straddlesLowerBound_T_57 = _res_aligned_straddlesLowerBound_T_51; // @[PMP.scala:123:{35,49}] wire _res_aligned_straddlesLowerBound_T_58 = _res_aligned_straddlesLowerBound_T_57 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62] wire [2:0] _res_aligned_straddlesLowerBound_T_65 = ~_res_aligned_straddlesLowerBound_T_64; // @[PMP.scala:123:{127,129}] wire [28:0] _res_aligned_straddlesUpperBound_T_57 = _res_aligned_straddlesUpperBound_T_51; // @[PMP.scala:124:{35,49}] wire _res_aligned_straddlesUpperBound_T_58 = _res_aligned_straddlesUpperBound_T_57 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}] wire [2:0] _res_aligned_straddlesUpperBound_T_65 = _res_aligned_straddlesUpperBound_T_64 | res_aligned_lsbMask_3; // @[package.scala:243:46] wire res_aligned_pow2Aligned_3 = _res_aligned_pow2Aligned_T_11 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26] wire [2:0] _res_hit_lsbMask_T_13 = _res_hit_lsbMask_T_12[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_lsbMask_T_14 = ~_res_hit_lsbMask_T_13; // @[package.scala:243:{46,76}] wire [31:0] res_hit_lsbMask_4 = {29'h0, _res_hit_lsbMask_T_14}; // @[package.scala:243:46] wire [28:0] _res_hit_msbMatch_T_47 = _res_hit_msbMatch_T_40; // @[PMP.scala:63:47, :69:29] wire [28:0] _res_hit_msbMatch_T_49 = _res_hit_msbMatch_T_47; // @[PMP.scala:63:{47,52}] wire res_hit_msbMatch_4 = _res_hit_msbMatch_T_49 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [2:0] _res_hit_lsbMatch_T_47 = _res_hit_lsbMatch_T_40; // @[PMP.scala:63:47, :70:28] wire [2:0] _res_hit_lsbMatch_T_46 = res_hit_lsbMask_4[2:0]; // @[PMP.scala:68:26, :70:80] wire [2:0] _res_hit_lsbMatch_T_48 = ~_res_hit_lsbMatch_T_46; // @[PMP.scala:63:54, :70:80] wire [2:0] _res_hit_lsbMatch_T_49 = _res_hit_lsbMatch_T_47 & _res_hit_lsbMatch_T_48; // @[PMP.scala:63:{47,52,54}] wire res_hit_lsbMatch_4 = _res_hit_lsbMatch_T_49 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire _res_hit_T_53 = res_hit_msbMatch_4 & res_hit_lsbMatch_4; // @[PMP.scala:63:58, :71:16] wire [2:0] _res_hit_T_56 = _res_hit_T_55[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_T_57 = ~_res_hit_T_56; // @[package.scala:243:{46,76}] wire [28:0] _res_hit_msbsEqual_T_62 = _res_hit_msbsEqual_T_56; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_8 = _res_hit_msbsEqual_T_62 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_57 = _res_hit_lsbsLess_T_56 | _res_hit_T_57; // @[package.scala:243:46] wire [28:0] _res_hit_msbsEqual_T_69 = _res_hit_msbsEqual_T_63; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_9 = _res_hit_msbsEqual_T_69 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_64 = _res_hit_lsbsLess_T_63; // @[PMP.scala:82:{25,42}] wire [2:0] _res_aligned_lsbMask_T_9 = _res_aligned_lsbMask_T_8[2:0]; // @[package.scala:243:{71,76}] wire [2:0] res_aligned_lsbMask_4 = ~_res_aligned_lsbMask_T_9; // @[package.scala:243:{46,76}] wire [2:0] _res_aligned_pow2Aligned_T_14 = res_aligned_lsbMask_4; // @[package.scala:243:46] wire [28:0] _res_aligned_straddlesLowerBound_T_74 = _res_aligned_straddlesLowerBound_T_68; // @[PMP.scala:123:{35,49}] wire _res_aligned_straddlesLowerBound_T_75 = _res_aligned_straddlesLowerBound_T_74 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62] wire [2:0] _res_aligned_straddlesLowerBound_T_82 = ~_res_aligned_straddlesLowerBound_T_81; // @[PMP.scala:123:{127,129}] wire [28:0] _res_aligned_straddlesUpperBound_T_74 = _res_aligned_straddlesUpperBound_T_68; // @[PMP.scala:124:{35,49}] wire _res_aligned_straddlesUpperBound_T_75 = _res_aligned_straddlesUpperBound_T_74 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}] wire [2:0] _res_aligned_straddlesUpperBound_T_82 = _res_aligned_straddlesUpperBound_T_81 | res_aligned_lsbMask_4; // @[package.scala:243:46] wire res_aligned_pow2Aligned_4 = _res_aligned_pow2Aligned_T_14 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26] wire [2:0] _res_hit_lsbMask_T_16 = _res_hit_lsbMask_T_15[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_lsbMask_T_17 = ~_res_hit_lsbMask_T_16; // @[package.scala:243:{46,76}] wire [31:0] res_hit_lsbMask_5 = {29'h0, _res_hit_lsbMask_T_17}; // @[package.scala:243:46] wire [28:0] _res_hit_msbMatch_T_57 = _res_hit_msbMatch_T_50; // @[PMP.scala:63:47, :69:29] wire [28:0] _res_hit_msbMatch_T_59 = _res_hit_msbMatch_T_57; // @[PMP.scala:63:{47,52}] wire res_hit_msbMatch_5 = _res_hit_msbMatch_T_59 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [2:0] _res_hit_lsbMatch_T_57 = _res_hit_lsbMatch_T_50; // @[PMP.scala:63:47, :70:28] wire [2:0] _res_hit_lsbMatch_T_56 = res_hit_lsbMask_5[2:0]; // @[PMP.scala:68:26, :70:80] wire [2:0] _res_hit_lsbMatch_T_58 = ~_res_hit_lsbMatch_T_56; // @[PMP.scala:63:54, :70:80] wire [2:0] _res_hit_lsbMatch_T_59 = _res_hit_lsbMatch_T_57 & _res_hit_lsbMatch_T_58; // @[PMP.scala:63:{47,52,54}] wire res_hit_lsbMatch_5 = _res_hit_lsbMatch_T_59 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire _res_hit_T_66 = res_hit_msbMatch_5 & res_hit_lsbMatch_5; // @[PMP.scala:63:58, :71:16] wire [2:0] _res_hit_T_69 = _res_hit_T_68[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_T_70 = ~_res_hit_T_69; // @[package.scala:243:{46,76}] wire [28:0] _res_hit_msbsEqual_T_76 = _res_hit_msbsEqual_T_70; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_10 = _res_hit_msbsEqual_T_76 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_71 = _res_hit_lsbsLess_T_70 | _res_hit_T_70; // @[package.scala:243:46] wire [28:0] _res_hit_msbsEqual_T_83 = _res_hit_msbsEqual_T_77; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_11 = _res_hit_msbsEqual_T_83 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_78 = _res_hit_lsbsLess_T_77; // @[PMP.scala:82:{25,42}] wire [2:0] _res_aligned_lsbMask_T_11 = _res_aligned_lsbMask_T_10[2:0]; // @[package.scala:243:{71,76}] wire [2:0] res_aligned_lsbMask_5 = ~_res_aligned_lsbMask_T_11; // @[package.scala:243:{46,76}] wire [2:0] _res_aligned_pow2Aligned_T_17 = res_aligned_lsbMask_5; // @[package.scala:243:46] wire [28:0] _res_aligned_straddlesLowerBound_T_91 = _res_aligned_straddlesLowerBound_T_85; // @[PMP.scala:123:{35,49}] wire _res_aligned_straddlesLowerBound_T_92 = _res_aligned_straddlesLowerBound_T_91 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62] wire [2:0] _res_aligned_straddlesLowerBound_T_99 = ~_res_aligned_straddlesLowerBound_T_98; // @[PMP.scala:123:{127,129}] wire [28:0] _res_aligned_straddlesUpperBound_T_91 = _res_aligned_straddlesUpperBound_T_85; // @[PMP.scala:124:{35,49}] wire _res_aligned_straddlesUpperBound_T_92 = _res_aligned_straddlesUpperBound_T_91 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}] wire [2:0] _res_aligned_straddlesUpperBound_T_99 = _res_aligned_straddlesUpperBound_T_98 | res_aligned_lsbMask_5; // @[package.scala:243:46] wire res_aligned_pow2Aligned_5 = _res_aligned_pow2Aligned_T_17 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26] wire [2:0] _res_hit_lsbMask_T_19 = _res_hit_lsbMask_T_18[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_lsbMask_T_20 = ~_res_hit_lsbMask_T_19; // @[package.scala:243:{46,76}] wire [31:0] res_hit_lsbMask_6 = {29'h0, _res_hit_lsbMask_T_20}; // @[package.scala:243:46] wire [28:0] _res_hit_msbMatch_T_67 = _res_hit_msbMatch_T_60; // @[PMP.scala:63:47, :69:29] wire [28:0] _res_hit_msbMatch_T_69 = _res_hit_msbMatch_T_67; // @[PMP.scala:63:{47,52}] wire res_hit_msbMatch_6 = _res_hit_msbMatch_T_69 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [2:0] _res_hit_lsbMatch_T_67 = _res_hit_lsbMatch_T_60; // @[PMP.scala:63:47, :70:28] wire [2:0] _res_hit_lsbMatch_T_66 = res_hit_lsbMask_6[2:0]; // @[PMP.scala:68:26, :70:80] wire [2:0] _res_hit_lsbMatch_T_68 = ~_res_hit_lsbMatch_T_66; // @[PMP.scala:63:54, :70:80] wire [2:0] _res_hit_lsbMatch_T_69 = _res_hit_lsbMatch_T_67 & _res_hit_lsbMatch_T_68; // @[PMP.scala:63:{47,52,54}] wire res_hit_lsbMatch_6 = _res_hit_lsbMatch_T_69 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire _res_hit_T_79 = res_hit_msbMatch_6 & res_hit_lsbMatch_6; // @[PMP.scala:63:58, :71:16] wire [2:0] _res_hit_T_82 = _res_hit_T_81[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_T_83 = ~_res_hit_T_82; // @[package.scala:243:{46,76}] wire [28:0] _res_hit_msbsEqual_T_90 = _res_hit_msbsEqual_T_84; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_12 = _res_hit_msbsEqual_T_90 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_85 = _res_hit_lsbsLess_T_84 | _res_hit_T_83; // @[package.scala:243:46] wire [28:0] _res_hit_msbsEqual_T_97 = _res_hit_msbsEqual_T_91; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_13 = _res_hit_msbsEqual_T_97 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_92 = _res_hit_lsbsLess_T_91; // @[PMP.scala:82:{25,42}] wire [2:0] _res_aligned_lsbMask_T_13 = _res_aligned_lsbMask_T_12[2:0]; // @[package.scala:243:{71,76}] wire [2:0] res_aligned_lsbMask_6 = ~_res_aligned_lsbMask_T_13; // @[package.scala:243:{46,76}] wire [2:0] _res_aligned_pow2Aligned_T_20 = res_aligned_lsbMask_6; // @[package.scala:243:46] wire [28:0] _res_aligned_straddlesLowerBound_T_108 = _res_aligned_straddlesLowerBound_T_102; // @[PMP.scala:123:{35,49}] wire _res_aligned_straddlesLowerBound_T_109 = _res_aligned_straddlesLowerBound_T_108 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62] wire [2:0] _res_aligned_straddlesLowerBound_T_116 = ~_res_aligned_straddlesLowerBound_T_115; // @[PMP.scala:123:{127,129}] wire [28:0] _res_aligned_straddlesUpperBound_T_108 = _res_aligned_straddlesUpperBound_T_102; // @[PMP.scala:124:{35,49}] wire _res_aligned_straddlesUpperBound_T_109 = _res_aligned_straddlesUpperBound_T_108 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}] wire [2:0] _res_aligned_straddlesUpperBound_T_116 = _res_aligned_straddlesUpperBound_T_115 | res_aligned_lsbMask_6; // @[package.scala:243:46] wire res_aligned_pow2Aligned_6 = _res_aligned_pow2Aligned_T_20 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26] wire [2:0] _res_hit_lsbMask_T_22 = _res_hit_lsbMask_T_21[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_lsbMask_T_23 = ~_res_hit_lsbMask_T_22; // @[package.scala:243:{46,76}] wire [31:0] res_hit_lsbMask_7 = {29'h0, _res_hit_lsbMask_T_23}; // @[package.scala:243:46] wire [28:0] _res_hit_msbMatch_T_77 = _res_hit_msbMatch_T_70; // @[PMP.scala:63:47, :69:29] wire [28:0] _res_hit_msbMatch_T_79 = _res_hit_msbMatch_T_77; // @[PMP.scala:63:{47,52}] wire res_hit_msbMatch_7 = _res_hit_msbMatch_T_79 == 29'h0; // @[PMP.scala:63:{52,58}, :68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:62] wire [2:0] _res_hit_lsbMatch_T_77 = _res_hit_lsbMatch_T_70; // @[PMP.scala:63:47, :70:28] wire [2:0] _res_hit_lsbMatch_T_76 = res_hit_lsbMask_7[2:0]; // @[PMP.scala:68:26, :70:80] wire [2:0] _res_hit_lsbMatch_T_78 = ~_res_hit_lsbMatch_T_76; // @[PMP.scala:63:54, :70:80] wire [2:0] _res_hit_lsbMatch_T_79 = _res_hit_lsbMatch_T_77 & _res_hit_lsbMatch_T_78; // @[PMP.scala:63:{47,52,54}] wire res_hit_lsbMatch_7 = _res_hit_lsbMatch_T_79 == 3'h0; // @[PMP.scala:63:{52,58}, :68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:39, :174:26] wire _res_hit_T_92 = res_hit_msbMatch_7 & res_hit_lsbMatch_7; // @[PMP.scala:63:58, :71:16] wire [2:0] _res_hit_T_95 = _res_hit_T_94[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _res_hit_T_96 = ~_res_hit_T_95; // @[package.scala:243:{46,76}] wire [28:0] _res_hit_msbsEqual_T_104 = _res_hit_msbsEqual_T_98; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_14 = _res_hit_msbsEqual_T_104 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_99 = _res_hit_lsbsLess_T_98 | _res_hit_T_96; // @[package.scala:243:46] wire [28:0] _res_hit_msbsEqual_T_111 = _res_hit_msbsEqual_T_105; // @[PMP.scala:81:{27,41}] wire res_hit_msbsEqual_15 = _res_hit_msbsEqual_T_111 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:{41,54,69}, :123:67, :124:62] wire [2:0] _res_hit_lsbsLess_T_106 = _res_hit_lsbsLess_T_105; // @[PMP.scala:82:{25,42}] wire [2:0] _res_aligned_lsbMask_T_15 = _res_aligned_lsbMask_T_14[2:0]; // @[package.scala:243:{71,76}] wire [2:0] res_aligned_lsbMask_7 = ~_res_aligned_lsbMask_T_15; // @[package.scala:243:{46,76}] wire [2:0] _res_aligned_pow2Aligned_T_23 = res_aligned_lsbMask_7; // @[package.scala:243:46] wire [28:0] _res_aligned_straddlesLowerBound_T_125 = _res_aligned_straddlesLowerBound_T_119; // @[PMP.scala:123:{35,49}] wire _res_aligned_straddlesLowerBound_T_126 = _res_aligned_straddlesLowerBound_T_125 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:{49,67,82}, :124:62] wire [2:0] _res_aligned_straddlesLowerBound_T_133 = ~_res_aligned_straddlesLowerBound_T_132; // @[PMP.scala:123:{127,129}] wire [28:0] _res_aligned_straddlesUpperBound_T_125 = _res_aligned_straddlesUpperBound_T_119; // @[PMP.scala:124:{35,49}] wire _res_aligned_straddlesUpperBound_T_126 = _res_aligned_straddlesUpperBound_T_125 == 29'h0; // @[PMP.scala:68:26, :69:{53,72}, :80:52, :81:54, :123:67, :124:{49,62,77}] wire [2:0] _res_aligned_straddlesUpperBound_T_133 = _res_aligned_straddlesUpperBound_T_132 | res_aligned_lsbMask_7; // @[package.scala:243:46] wire res_aligned_pow2Aligned_7 = _res_aligned_pow2Aligned_T_23 == 3'h0; // @[PMP.scala:68:26, :70:55, :82:64, :123:{108,125}, :124:{98,115}, :126:{32,39,57}, :174:26] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RouteComputer.scala: package constellation.router import chisel3._ import chisel3.util._ import chisel3.util.experimental.decode.{TruthTable, decoder} import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import freechips.rocketchip.rocket.DecodeLogic import constellation.channel._ import constellation.routing.{FlowRoutingBundle, FlowRoutingInfo} import constellation.noc.{HasNoCParams} class RouteComputerReq(implicit val p: Parameters) extends Bundle with HasNoCParams { val src_virt_id = UInt(virtualChannelBits.W) val flow = new FlowRoutingBundle } class RouteComputerResp( val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) } class RouteComputer( val routerParams: RouterParams, val inParams: Seq[ChannelParams], val outParams: Seq[ChannelParams], val ingressParams: Seq[IngressChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterParams with HasRouterInputParams with HasRouterOutputParams with HasNoCParams { val io = IO(new Bundle { val req = MixedVec(allInParams.map { u => Flipped(Decoupled(new RouteComputerReq)) }) val resp = MixedVec(allInParams.map { u => Output(new RouteComputerResp(outParams, egressParams)) }) }) (io.req zip io.resp).zipWithIndex.map { case ((req, resp), i) => req.ready := true.B if (outParams.size == 0) { assert(!req.valid) resp.vc_sel := DontCare } else { def toUInt(t: (Int, FlowRoutingInfo)): UInt = { val l2 = (BigInt(t._1) << req.bits.flow.vnet_id .getWidth) | t._2.vNetId val l3 = ( l2 << req.bits.flow.ingress_node .getWidth) | t._2.ingressNode val l4 = ( l3 << req.bits.flow.ingress_node_id.getWidth) | t._2.ingressNodeId val l5 = ( l4 << req.bits.flow.egress_node .getWidth) | t._2.egressNode val l6 = ( l5 << req.bits.flow.egress_node_id .getWidth) | t._2.egressNodeId l6.U(req.bits.getWidth.W) } val flow = req.bits.flow val table = allInParams(i).possibleFlows.toSeq.distinct.map { pI => allInParams(i).channelRoutingInfos.map { cI => var row: String = "b" (0 until nOutputs).foreach { o => (0 until outParams(o).nVirtualChannels).foreach { outVId => row = row + (if (routingRelation(cI, outParams(o).channelRoutingInfos(outVId), pI)) "1" else "0") } } ((cI.vc, pI), row) } }.flatten val addr = req.bits.asUInt val width = outParams.map(_.nVirtualChannels).reduce(_+_) val decoded = if (table.size > 0) { val truthTable = TruthTable( table.map { e => (BitPat(toUInt(e._1)), BitPat(e._2)) }, BitPat("b" + "?" * width) ) Reverse(decoder(addr, truthTable)) } else { 0.U(width.W) } var idx = 0 (0 until nAllOutputs).foreach { o => if (o < nOutputs) { (0 until outParams(o).nVirtualChannels).foreach { outVId => resp.vc_sel(o)(outVId) := decoded(idx) idx += 1 } } else { resp.vc_sel(o)(0) := false.B } } } } }
module RouteComputer_15( // @[RouteComputer.scala:29:7] input [2:0] io_req_3_bits_src_virt_id, // @[RouteComputer.scala:40:14] input [2:0] io_req_3_bits_flow_vnet_id, // @[RouteComputer.scala:40:14] input [4:0] io_req_3_bits_flow_ingress_node, // @[RouteComputer.scala:40:14] input [1:0] io_req_3_bits_flow_ingress_node_id, // @[RouteComputer.scala:40:14] input [4:0] io_req_3_bits_flow_egress_node, // @[RouteComputer.scala:40:14] input [1:0] io_req_3_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14] input [2:0] io_req_2_bits_src_virt_id, // @[RouteComputer.scala:40:14] input [2:0] io_req_2_bits_flow_vnet_id, // @[RouteComputer.scala:40:14] input [4:0] io_req_2_bits_flow_ingress_node, // @[RouteComputer.scala:40:14] input [1:0] io_req_2_bits_flow_ingress_node_id, // @[RouteComputer.scala:40:14] input [4:0] io_req_2_bits_flow_egress_node, // @[RouteComputer.scala:40:14] input [1:0] io_req_2_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14] input [2:0] io_req_1_bits_src_virt_id, // @[RouteComputer.scala:40:14] input [2:0] io_req_1_bits_flow_vnet_id, // @[RouteComputer.scala:40:14] input [4:0] io_req_1_bits_flow_ingress_node, // @[RouteComputer.scala:40:14] input [1:0] io_req_1_bits_flow_ingress_node_id, // @[RouteComputer.scala:40:14] input [4:0] io_req_1_bits_flow_egress_node, // @[RouteComputer.scala:40:14] input [1:0] io_req_1_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14] input [2:0] io_req_0_bits_src_virt_id, // @[RouteComputer.scala:40:14] input [2:0] io_req_0_bits_flow_vnet_id, // @[RouteComputer.scala:40:14] input [4:0] io_req_0_bits_flow_ingress_node, // @[RouteComputer.scala:40:14] input [1:0] io_req_0_bits_flow_ingress_node_id, // @[RouteComputer.scala:40:14] input [4:0] io_req_0_bits_flow_egress_node, // @[RouteComputer.scala:40:14] input [1:0] io_req_0_bits_flow_egress_node_id, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_2_1, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_2_2, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_2_3, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_2_4, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_2_5, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_2_6, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_2_7, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_1_1, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_1_2, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_1_3, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_1_4, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_1_5, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_1_6, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_1_7, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_0_1, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_0_2, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_0_3, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_0_4, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_0_5, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_0_6, // @[RouteComputer.scala:40:14] output io_resp_3_vc_sel_0_7, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_3_0, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_3_1, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_3_2, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_3_3, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_3_4, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_3_5, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_3_6, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_3_7, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_1_1, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_1_2, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_1_3, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_1_4, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_1_5, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_1_6, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_1_7, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_1, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_2, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_3, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_4, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_5, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_6, // @[RouteComputer.scala:40:14] output io_resp_2_vc_sel_0_7, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_3_4, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_3_5, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_3_6, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_3_7, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_2_4, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_2_5, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_2_6, // @[RouteComputer.scala:40:14] output io_resp_1_vc_sel_2_7, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_3_0, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_3_1, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_3_2, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_3_3, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_3_4, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_3_5, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_3_6, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_3_7, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_2_0, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_2_1, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_2_2, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_2_3, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_2_4, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_2_5, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_2_6, // @[RouteComputer.scala:40:14] output io_resp_0_vc_sel_2_7 // @[RouteComputer.scala:40:14] ); wire [16:0] decoded_invInputs = ~{io_req_0_bits_flow_vnet_id, io_req_0_bits_flow_ingress_node, io_req_0_bits_flow_ingress_node_id, io_req_0_bits_flow_egress_node, io_req_0_bits_flow_egress_node_id}; // @[pla.scala:78:21] wire [2:0] _decoded_andMatrixOutputs_T = {decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], decoded_invInputs[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [2:0] _decoded_andMatrixOutputs_T_1 = {decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], decoded_invInputs[6]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [2:0] _decoded_andMatrixOutputs_T_3 = {io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], io_req_0_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_5 = {io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], io_req_0_bits_flow_egress_node[1], io_req_0_bits_flow_egress_node[2], decoded_invInputs[5], decoded_invInputs[6], io_req_0_bits_flow_ingress_node_id[0], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_7 = {io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], io_req_0_bits_flow_egress_node[1], decoded_invInputs[4], io_req_0_bits_flow_egress_node[3], decoded_invInputs[6], io_req_0_bits_flow_ingress_node_id[0], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [2:0] _decoded_andMatrixOutputs_T_8 = {io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], io_req_0_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_10 = {io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], io_req_0_bits_flow_egress_node[1], io_req_0_bits_flow_egress_node[2], decoded_invInputs[5], decoded_invInputs[6], io_req_0_bits_flow_ingress_node_id[0], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_12 = {io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], io_req_0_bits_flow_egress_node[1], decoded_invInputs[4], io_req_0_bits_flow_egress_node[3], decoded_invInputs[6], io_req_0_bits_flow_ingress_node_id[0], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _decoded_andMatrixOutputs_T_13 = {decoded_invInputs[0], decoded_invInputs[1], io_req_0_bits_flow_egress_node[1], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], io_req_0_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _decoded_andMatrixOutputs_T_14 = {decoded_invInputs[0], decoded_invInputs[1], io_req_0_bits_flow_egress_node[2], decoded_invInputs[5], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], io_req_0_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _decoded_andMatrixOutputs_T_16 = {decoded_invInputs[0], decoded_invInputs[1], io_req_0_bits_flow_egress_node[3], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], io_req_0_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] decoded_invInputs_1 = ~{io_req_1_bits_flow_vnet_id[1:0], io_req_1_bits_flow_ingress_node, io_req_1_bits_flow_ingress_node_id, io_req_1_bits_flow_egress_node, io_req_1_bits_flow_egress_node_id}; // @[pla.scala:78:21] wire [15:0] _decoded_andMatrixOutputs_T_24 = {decoded_invInputs_1[0], decoded_invInputs_1[1], decoded_invInputs_1[4], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[0], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _decoded_andMatrixOutputs_T_25 = {decoded_invInputs_1[0], decoded_invInputs_1[1], decoded_invInputs_1[4], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[0], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _decoded_andMatrixOutputs_T_26 = {decoded_invInputs_1[0], io_req_1_bits_flow_egress_node[2], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[0], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_27 = {decoded_invInputs_1[0], io_req_1_bits_flow_egress_node[1], io_req_1_bits_flow_egress_node[2], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[0], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _decoded_andMatrixOutputs_T_28 = {decoded_invInputs_1[0], io_req_1_bits_flow_egress_node[3], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[0], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_29 = {decoded_invInputs_1[0], io_req_1_bits_flow_egress_node[1], decoded_invInputs_1[4], io_req_1_bits_flow_egress_node[3], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[0], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _decoded_andMatrixOutputs_T_30 = {decoded_invInputs_1[0], decoded_invInputs_1[1], decoded_invInputs_1[4], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[1], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _decoded_andMatrixOutputs_T_31 = {decoded_invInputs_1[0], decoded_invInputs_1[1], decoded_invInputs_1[4], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[1], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _decoded_andMatrixOutputs_T_32 = {decoded_invInputs_1[0], io_req_1_bits_flow_egress_node[2], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[1], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_33 = {decoded_invInputs_1[0], io_req_1_bits_flow_egress_node[1], io_req_1_bits_flow_egress_node[2], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[1], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _decoded_andMatrixOutputs_T_34 = {decoded_invInputs_1[0], io_req_1_bits_flow_egress_node[3], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[1], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_35 = {decoded_invInputs_1[0], io_req_1_bits_flow_egress_node[1], decoded_invInputs_1[4], io_req_1_bits_flow_egress_node[3], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[1], io_req_1_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] decoded_invInputs_2 = ~{io_req_2_bits_flow_vnet_id, io_req_2_bits_flow_ingress_node, io_req_2_bits_flow_ingress_node_id, io_req_2_bits_flow_egress_node, io_req_2_bits_flow_egress_node_id}; // @[pla.scala:78:21] wire [2:0] _decoded_andMatrixOutputs_T_37 = {decoded_invInputs_2[0], io_req_2_bits_flow_egress_node_id[1], io_req_2_bits_src_virt_id[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_39 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[0], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_42 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[0], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], io_req_2_bits_flow_ingress_node[2], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_44 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[0], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], io_req_2_bits_flow_ingress_node[3], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [2:0] _decoded_andMatrixOutputs_T_45 = {decoded_invInputs_2[0], io_req_2_bits_flow_egress_node_id[1], io_req_2_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _decoded_andMatrixOutputs_T_46 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[2], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_47 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[0], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _decoded_andMatrixOutputs_T_48 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[3], io_req_2_bits_flow_egress_node[2], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _decoded_andMatrixOutputs_T_49 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[3], io_req_2_bits_flow_egress_node[3], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_51 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[2], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], io_req_2_bits_flow_ingress_node[2], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_52 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[0], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], io_req_2_bits_flow_ingress_node[2], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_53 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[2], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], io_req_2_bits_flow_ingress_node[3], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_54 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[0], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], io_req_2_bits_flow_ingress_node[3], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [4:0] _decoded_andMatrixOutputs_T_56 = {decoded_invInputs_2[0], io_req_2_bits_flow_vnet_id[1], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[0], io_req_2_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [4:0] _decoded_andMatrixOutputs_T_57 = {io_req_2_bits_flow_egress_node_id[0], io_req_2_bits_flow_vnet_id[1], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[0], io_req_2_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [3:0] _decoded_andMatrixOutputs_T_58 = {decoded_invInputs_2[0], decoded_invInputs_2[1], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [2:0] _decoded_andMatrixOutputs_T_59 = {decoded_invInputs_2[0], io_req_2_bits_flow_egress_node_id[1], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [14:0] _decoded_andMatrixOutputs_T_60 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[2], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_61 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[0], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _decoded_andMatrixOutputs_T_62 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[3], io_req_2_bits_flow_egress_node[2], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [15:0] _decoded_andMatrixOutputs_T_63 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[3], io_req_2_bits_flow_egress_node[3], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_65 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[2], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], io_req_2_bits_flow_ingress_node[2], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_66 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[0], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], io_req_2_bits_flow_ingress_node[2], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_67 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[2], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], io_req_2_bits_flow_ingress_node[3], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_68 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[0], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], io_req_2_bits_flow_ingress_node[3], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [4:0] _decoded_andMatrixOutputs_T_69 = {io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_vnet_id[1], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [3:0] _decoded_andMatrixOutputs_T_71 = {decoded_invInputs_2[0], io_req_2_bits_flow_vnet_id[2], io_req_2_bits_src_virt_id[0], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [3:0] _decoded_andMatrixOutputs_T_72 = {decoded_invInputs_2[0], io_req_2_bits_flow_vnet_id[2], io_req_2_bits_src_virt_id[1], io_req_2_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] decoded_invInputs_3 = ~{io_req_3_bits_flow_vnet_id, io_req_3_bits_flow_ingress_node, io_req_3_bits_flow_ingress_node_id, io_req_3_bits_flow_egress_node, io_req_3_bits_flow_egress_node_id}; // @[pla.scala:78:21] wire [17:0] _decoded_andMatrixOutputs_T_74 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_77 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_79 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_82 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_83 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_84 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_85 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[1], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_86 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_87 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_88 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_89 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_90 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[1], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_91 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_92 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_93 = {decoded_invInputs_3[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], decoded_invInputs_3[7], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [9:0] _decoded_andMatrixOutputs_T_95 = {decoded_invInputs_3[0], io_req_3_bits_flow_ingress_node[1], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_96 = {decoded_invInputs_3[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], decoded_invInputs_3[7], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [9:0] _decoded_andMatrixOutputs_T_98 = {decoded_invInputs_3[0], io_req_3_bits_flow_ingress_node[1], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0], io_req_3_bits_src_virt_id[1]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_99 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_100 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_101 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[1], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_102 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_103 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_104 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_105 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_106 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[1], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_107 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [17:0] _decoded_andMatrixOutputs_T_108 = {io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[0], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_109 = {decoded_invInputs_3[0], decoded_invInputs_3[1], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], decoded_invInputs_3[7], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_110 = {decoded_invInputs_3[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[1], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], decoded_invInputs_3[7], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [9:0] _decoded_andMatrixOutputs_T_111 = {decoded_invInputs_3[0], decoded_invInputs_3[1], io_req_3_bits_flow_ingress_node[1], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_112 = {decoded_invInputs_3[0], decoded_invInputs_3[1], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], decoded_invInputs_3[7], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [16:0] _decoded_andMatrixOutputs_T_113 = {decoded_invInputs_3[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[1], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], decoded_invInputs_3[7], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [9:0] _decoded_andMatrixOutputs_T_114 = {decoded_invInputs_3[0], decoded_invInputs_3[1], io_req_3_bits_flow_ingress_node[1], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [7:0] _decoded_andMatrixOutputs_T_116 = {decoded_invInputs_3[0], decoded_invInputs_3[10], decoded_invInputs_3[13], decoded_invInputs_3[14], decoded_invInputs_3[15], io_req_3_bits_flow_vnet_id[2], io_req_3_bits_src_virt_id[0], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] wire [7:0] _decoded_andMatrixOutputs_T_117 = {decoded_invInputs_3[0], decoded_invInputs_3[10], decoded_invInputs_3[13], decoded_invInputs_3[14], decoded_invInputs_3[15], io_req_3_bits_flow_vnet_id[2], io_req_3_bits_src_virt_id[1], io_req_3_bits_src_virt_id[2]}; // @[pla.scala:78:21, :90:45, :91:29, :98:53] assign io_resp_3_vc_sel_2_1 = |{&{io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[1], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0]}, &{io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], io_req_3_bits_flow_egress_node[1], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0]}, &_decoded_andMatrixOutputs_T_85, &_decoded_andMatrixOutputs_T_90, &_decoded_andMatrixOutputs_T_101, &_decoded_andMatrixOutputs_T_106}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_2_2 = |{&_decoded_andMatrixOutputs_T_85, &_decoded_andMatrixOutputs_T_90, &_decoded_andMatrixOutputs_T_101, &_decoded_andMatrixOutputs_T_106}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_2_3 = |{&_decoded_andMatrixOutputs_T_85, &_decoded_andMatrixOutputs_T_90, &{decoded_invInputs_3[0], io_req_3_bits_flow_egress_node[1], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], decoded_invInputs_3[7], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0], io_req_3_bits_src_virt_id[1]}, &{decoded_invInputs_3[0], io_req_3_bits_flow_egress_node[1], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], decoded_invInputs_3[7], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], io_req_3_bits_flow_vnet_id[1], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0], io_req_3_bits_src_virt_id[1]}, &_decoded_andMatrixOutputs_T_101, &_decoded_andMatrixOutputs_T_106, &_decoded_andMatrixOutputs_T_110, &_decoded_andMatrixOutputs_T_113}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_2_4 = |{&_decoded_andMatrixOutputs_T_85, &_decoded_andMatrixOutputs_T_90, &_decoded_andMatrixOutputs_T_101, &_decoded_andMatrixOutputs_T_106, &_decoded_andMatrixOutputs_T_110, &_decoded_andMatrixOutputs_T_113, &{decoded_invInputs_3[0], decoded_invInputs_3[1], decoded_invInputs_3[10], decoded_invInputs_3[13], decoded_invInputs_3[14], decoded_invInputs_3[15], io_req_3_bits_flow_vnet_id[2], io_req_3_bits_src_virt_id[2]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_2_5 = |{&_decoded_andMatrixOutputs_T_85, &_decoded_andMatrixOutputs_T_90, &_decoded_andMatrixOutputs_T_101, &_decoded_andMatrixOutputs_T_106, &_decoded_andMatrixOutputs_T_110, &_decoded_andMatrixOutputs_T_113, &_decoded_andMatrixOutputs_T_116, &_decoded_andMatrixOutputs_T_117}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_2_6 = |{&_decoded_andMatrixOutputs_T_85, &_decoded_andMatrixOutputs_T_90, &_decoded_andMatrixOutputs_T_101, &_decoded_andMatrixOutputs_T_106, &_decoded_andMatrixOutputs_T_110, &_decoded_andMatrixOutputs_T_113, &_decoded_andMatrixOutputs_T_116, &_decoded_andMatrixOutputs_T_117}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_2_7 = |{&_decoded_andMatrixOutputs_T_85, &_decoded_andMatrixOutputs_T_90, &_decoded_andMatrixOutputs_T_101, &_decoded_andMatrixOutputs_T_106, &_decoded_andMatrixOutputs_T_110, &_decoded_andMatrixOutputs_T_113, &_decoded_andMatrixOutputs_T_116, &_decoded_andMatrixOutputs_T_117}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_1_1 = |{&{io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0]}, &{io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], io_req_3_bits_flow_ingress_node[2], decoded_invInputs_3[12], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0]}, &{io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], io_req_3_bits_flow_ingress_node[0], decoded_invInputs_3[10], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0]}, &{io_req_3_bits_flow_egress_node_id[0], decoded_invInputs_3[1], decoded_invInputs_3[2], decoded_invInputs_3[3], decoded_invInputs_3[4], decoded_invInputs_3[5], decoded_invInputs_3[6], io_req_3_bits_flow_ingress_node_id[0], decoded_invInputs_3[8], decoded_invInputs_3[9], io_req_3_bits_flow_ingress_node[1], decoded_invInputs_3[11], io_req_3_bits_flow_ingress_node[3], decoded_invInputs_3[13], io_req_3_bits_flow_vnet_id[0], decoded_invInputs_3[15], decoded_invInputs_3[16], io_req_3_bits_src_virt_id[0]}, &_decoded_andMatrixOutputs_T_83, &_decoded_andMatrixOutputs_T_86, &_decoded_andMatrixOutputs_T_88, &_decoded_andMatrixOutputs_T_91, &_decoded_andMatrixOutputs_T_99, &_decoded_andMatrixOutputs_T_102, &_decoded_andMatrixOutputs_T_104, &_decoded_andMatrixOutputs_T_107}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_1_2 = |{&_decoded_andMatrixOutputs_T_83, &_decoded_andMatrixOutputs_T_86, &_decoded_andMatrixOutputs_T_88, &_decoded_andMatrixOutputs_T_91, &_decoded_andMatrixOutputs_T_99, &_decoded_andMatrixOutputs_T_102, &_decoded_andMatrixOutputs_T_104, &_decoded_andMatrixOutputs_T_107}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_1_3 = |{&_decoded_andMatrixOutputs_T_83, &_decoded_andMatrixOutputs_T_86, &_decoded_andMatrixOutputs_T_88, &_decoded_andMatrixOutputs_T_91, &_decoded_andMatrixOutputs_T_99, &_decoded_andMatrixOutputs_T_102, &_decoded_andMatrixOutputs_T_104, &_decoded_andMatrixOutputs_T_107}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_1_4 = |{&_decoded_andMatrixOutputs_T_83, &_decoded_andMatrixOutputs_T_86, &_decoded_andMatrixOutputs_T_88, &_decoded_andMatrixOutputs_T_91, &_decoded_andMatrixOutputs_T_99, &_decoded_andMatrixOutputs_T_102, &_decoded_andMatrixOutputs_T_104, &_decoded_andMatrixOutputs_T_107}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_1_5 = |{&_decoded_andMatrixOutputs_T_83, &_decoded_andMatrixOutputs_T_86, &_decoded_andMatrixOutputs_T_88, &_decoded_andMatrixOutputs_T_91, &_decoded_andMatrixOutputs_T_99, &_decoded_andMatrixOutputs_T_102, &_decoded_andMatrixOutputs_T_104, &_decoded_andMatrixOutputs_T_107}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_1_6 = |{&_decoded_andMatrixOutputs_T_83, &_decoded_andMatrixOutputs_T_86, &_decoded_andMatrixOutputs_T_88, &_decoded_andMatrixOutputs_T_91, &_decoded_andMatrixOutputs_T_99, &_decoded_andMatrixOutputs_T_102, &_decoded_andMatrixOutputs_T_104, &_decoded_andMatrixOutputs_T_107}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_1_7 = |{&_decoded_andMatrixOutputs_T_83, &_decoded_andMatrixOutputs_T_86, &_decoded_andMatrixOutputs_T_88, &_decoded_andMatrixOutputs_T_91, &_decoded_andMatrixOutputs_T_99, &_decoded_andMatrixOutputs_T_102, &_decoded_andMatrixOutputs_T_104, &_decoded_andMatrixOutputs_T_107}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_0_1 = |{&_decoded_andMatrixOutputs_T_74, &_decoded_andMatrixOutputs_T_77, &_decoded_andMatrixOutputs_T_79, &_decoded_andMatrixOutputs_T_82, &_decoded_andMatrixOutputs_T_84, &_decoded_andMatrixOutputs_T_87, &_decoded_andMatrixOutputs_T_89, &_decoded_andMatrixOutputs_T_92, &_decoded_andMatrixOutputs_T_100, &_decoded_andMatrixOutputs_T_103, &_decoded_andMatrixOutputs_T_105, &_decoded_andMatrixOutputs_T_108}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_0_2 = |{&_decoded_andMatrixOutputs_T_74, &_decoded_andMatrixOutputs_T_77, &_decoded_andMatrixOutputs_T_79, &_decoded_andMatrixOutputs_T_82, &_decoded_andMatrixOutputs_T_84, &_decoded_andMatrixOutputs_T_87, &_decoded_andMatrixOutputs_T_89, &_decoded_andMatrixOutputs_T_92, &_decoded_andMatrixOutputs_T_100, &_decoded_andMatrixOutputs_T_103, &_decoded_andMatrixOutputs_T_105, &_decoded_andMatrixOutputs_T_108}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_0_3 = |{&_decoded_andMatrixOutputs_T_74, &_decoded_andMatrixOutputs_T_77, &_decoded_andMatrixOutputs_T_79, &_decoded_andMatrixOutputs_T_82, &_decoded_andMatrixOutputs_T_84, &_decoded_andMatrixOutputs_T_87, &_decoded_andMatrixOutputs_T_89, &_decoded_andMatrixOutputs_T_92, &_decoded_andMatrixOutputs_T_93, &_decoded_andMatrixOutputs_T_95, &_decoded_andMatrixOutputs_T_96, &_decoded_andMatrixOutputs_T_98, &_decoded_andMatrixOutputs_T_100, &_decoded_andMatrixOutputs_T_103, &_decoded_andMatrixOutputs_T_105, &_decoded_andMatrixOutputs_T_108, &_decoded_andMatrixOutputs_T_109, &_decoded_andMatrixOutputs_T_111, &_decoded_andMatrixOutputs_T_112, &_decoded_andMatrixOutputs_T_114}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_0_4 = |{&_decoded_andMatrixOutputs_T_74, &_decoded_andMatrixOutputs_T_77, &_decoded_andMatrixOutputs_T_79, &_decoded_andMatrixOutputs_T_82, &_decoded_andMatrixOutputs_T_84, &_decoded_andMatrixOutputs_T_87, &_decoded_andMatrixOutputs_T_89, &_decoded_andMatrixOutputs_T_92, &_decoded_andMatrixOutputs_T_93, &_decoded_andMatrixOutputs_T_95, &_decoded_andMatrixOutputs_T_96, &_decoded_andMatrixOutputs_T_98, &_decoded_andMatrixOutputs_T_100, &_decoded_andMatrixOutputs_T_103, &_decoded_andMatrixOutputs_T_105, &_decoded_andMatrixOutputs_T_108, &_decoded_andMatrixOutputs_T_109, &_decoded_andMatrixOutputs_T_111, &_decoded_andMatrixOutputs_T_112, &_decoded_andMatrixOutputs_T_114}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_0_5 = |{&_decoded_andMatrixOutputs_T_74, &_decoded_andMatrixOutputs_T_77, &_decoded_andMatrixOutputs_T_79, &_decoded_andMatrixOutputs_T_82, &_decoded_andMatrixOutputs_T_84, &_decoded_andMatrixOutputs_T_87, &_decoded_andMatrixOutputs_T_89, &_decoded_andMatrixOutputs_T_92, &_decoded_andMatrixOutputs_T_93, &_decoded_andMatrixOutputs_T_95, &_decoded_andMatrixOutputs_T_96, &_decoded_andMatrixOutputs_T_98, &_decoded_andMatrixOutputs_T_100, &_decoded_andMatrixOutputs_T_103, &_decoded_andMatrixOutputs_T_105, &_decoded_andMatrixOutputs_T_108, &_decoded_andMatrixOutputs_T_109, &_decoded_andMatrixOutputs_T_111, &_decoded_andMatrixOutputs_T_112, &_decoded_andMatrixOutputs_T_114}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_0_6 = |{&_decoded_andMatrixOutputs_T_74, &_decoded_andMatrixOutputs_T_77, &_decoded_andMatrixOutputs_T_79, &_decoded_andMatrixOutputs_T_82, &_decoded_andMatrixOutputs_T_84, &_decoded_andMatrixOutputs_T_87, &_decoded_andMatrixOutputs_T_89, &_decoded_andMatrixOutputs_T_92, &_decoded_andMatrixOutputs_T_93, &_decoded_andMatrixOutputs_T_95, &_decoded_andMatrixOutputs_T_96, &_decoded_andMatrixOutputs_T_98, &_decoded_andMatrixOutputs_T_100, &_decoded_andMatrixOutputs_T_103, &_decoded_andMatrixOutputs_T_105, &_decoded_andMatrixOutputs_T_108, &_decoded_andMatrixOutputs_T_109, &_decoded_andMatrixOutputs_T_111, &_decoded_andMatrixOutputs_T_112, &_decoded_andMatrixOutputs_T_114}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_3_vc_sel_0_7 = |{&_decoded_andMatrixOutputs_T_74, &_decoded_andMatrixOutputs_T_77, &_decoded_andMatrixOutputs_T_79, &_decoded_andMatrixOutputs_T_82, &_decoded_andMatrixOutputs_T_84, &_decoded_andMatrixOutputs_T_87, &_decoded_andMatrixOutputs_T_89, &_decoded_andMatrixOutputs_T_92, &_decoded_andMatrixOutputs_T_93, &_decoded_andMatrixOutputs_T_95, &_decoded_andMatrixOutputs_T_96, &_decoded_andMatrixOutputs_T_98, &_decoded_andMatrixOutputs_T_100, &_decoded_andMatrixOutputs_T_103, &_decoded_andMatrixOutputs_T_105, &_decoded_andMatrixOutputs_T_108, &_decoded_andMatrixOutputs_T_109, &_decoded_andMatrixOutputs_T_111, &_decoded_andMatrixOutputs_T_112, &_decoded_andMatrixOutputs_T_114}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_3_0 = &{decoded_invInputs_2[0], io_req_2_bits_flow_egress_node_id[1], io_req_2_bits_flow_egress_node[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}] assign io_resp_2_vc_sel_3_1 = |{&_decoded_andMatrixOutputs_T_37, &{io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[2], io_req_2_bits_flow_egress_node[3], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[0]}, &_decoded_andMatrixOutputs_T_45, &{io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[2], io_req_2_bits_flow_egress_node[3], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[1]}, &_decoded_andMatrixOutputs_T_59, &{io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_egress_node[2], io_req_2_bits_flow_egress_node[3], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[2]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_3_2 = |{&_decoded_andMatrixOutputs_T_37, &_decoded_andMatrixOutputs_T_45, &_decoded_andMatrixOutputs_T_48, &_decoded_andMatrixOutputs_T_49, &{io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], io_req_2_bits_flow_vnet_id[1], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[1]}, &_decoded_andMatrixOutputs_T_59, &_decoded_andMatrixOutputs_T_62, &_decoded_andMatrixOutputs_T_63, &_decoded_andMatrixOutputs_T_69}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_3_3 = |{&_decoded_andMatrixOutputs_T_37, &_decoded_andMatrixOutputs_T_45, &_decoded_andMatrixOutputs_T_48, &_decoded_andMatrixOutputs_T_49, &_decoded_andMatrixOutputs_T_57, &_decoded_andMatrixOutputs_T_59, &_decoded_andMatrixOutputs_T_62, &_decoded_andMatrixOutputs_T_63, &_decoded_andMatrixOutputs_T_69}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_3_4 = |{&_decoded_andMatrixOutputs_T_37, &_decoded_andMatrixOutputs_T_45, &_decoded_andMatrixOutputs_T_48, &_decoded_andMatrixOutputs_T_49, &_decoded_andMatrixOutputs_T_57, &_decoded_andMatrixOutputs_T_59, &_decoded_andMatrixOutputs_T_62, &_decoded_andMatrixOutputs_T_63, &_decoded_andMatrixOutputs_T_69, &{decoded_invInputs_2[0], decoded_invInputs_2[1], io_req_2_bits_flow_vnet_id[2], io_req_2_bits_src_virt_id[2]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_3_5 = |{&_decoded_andMatrixOutputs_T_37, &_decoded_andMatrixOutputs_T_45, &_decoded_andMatrixOutputs_T_48, &_decoded_andMatrixOutputs_T_49, &_decoded_andMatrixOutputs_T_57, &_decoded_andMatrixOutputs_T_59, &_decoded_andMatrixOutputs_T_62, &_decoded_andMatrixOutputs_T_63, &_decoded_andMatrixOutputs_T_69, &_decoded_andMatrixOutputs_T_71, &_decoded_andMatrixOutputs_T_72}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_3_6 = |{&_decoded_andMatrixOutputs_T_37, &_decoded_andMatrixOutputs_T_45, &_decoded_andMatrixOutputs_T_48, &_decoded_andMatrixOutputs_T_49, &_decoded_andMatrixOutputs_T_57, &_decoded_andMatrixOutputs_T_59, &_decoded_andMatrixOutputs_T_62, &_decoded_andMatrixOutputs_T_63, &_decoded_andMatrixOutputs_T_69, &_decoded_andMatrixOutputs_T_71, &_decoded_andMatrixOutputs_T_72}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_3_7 = |{&_decoded_andMatrixOutputs_T_37, &_decoded_andMatrixOutputs_T_45, &_decoded_andMatrixOutputs_T_48, &_decoded_andMatrixOutputs_T_49, &_decoded_andMatrixOutputs_T_57, &_decoded_andMatrixOutputs_T_59, &_decoded_andMatrixOutputs_T_62, &_decoded_andMatrixOutputs_T_63, &_decoded_andMatrixOutputs_T_69, &_decoded_andMatrixOutputs_T_71, &_decoded_andMatrixOutputs_T_72}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_1_1 = |{&{io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[2], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], io_req_2_bits_flow_ingress_node[0], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[0]}, &{io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[2], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], io_req_2_bits_flow_ingress_node[2], decoded_invInputs_2[12], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[0]}, &{io_req_2_bits_flow_egress_node_id[0], decoded_invInputs_2[1], decoded_invInputs_2[2], decoded_invInputs_2[3], decoded_invInputs_2[4], decoded_invInputs_2[5], decoded_invInputs_2[6], io_req_2_bits_flow_ingress_node_id[0], decoded_invInputs_2[8], decoded_invInputs_2[9], io_req_2_bits_flow_ingress_node[1], decoded_invInputs_2[11], io_req_2_bits_flow_ingress_node[3], decoded_invInputs_2[13], io_req_2_bits_flow_vnet_id[0], decoded_invInputs_2[15], decoded_invInputs_2[16], io_req_2_bits_src_virt_id[0]}, &_decoded_andMatrixOutputs_T_46, &_decoded_andMatrixOutputs_T_51, &_decoded_andMatrixOutputs_T_53, &_decoded_andMatrixOutputs_T_60, &_decoded_andMatrixOutputs_T_65, &_decoded_andMatrixOutputs_T_67}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_1_2 = |{&_decoded_andMatrixOutputs_T_46, &_decoded_andMatrixOutputs_T_51, &_decoded_andMatrixOutputs_T_53, &_decoded_andMatrixOutputs_T_60, &_decoded_andMatrixOutputs_T_65, &_decoded_andMatrixOutputs_T_67}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_1_3 = |{&_decoded_andMatrixOutputs_T_46, &_decoded_andMatrixOutputs_T_51, &_decoded_andMatrixOutputs_T_53, &_decoded_andMatrixOutputs_T_60, &_decoded_andMatrixOutputs_T_65, &_decoded_andMatrixOutputs_T_67}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_1_4 = |{&_decoded_andMatrixOutputs_T_46, &_decoded_andMatrixOutputs_T_51, &_decoded_andMatrixOutputs_T_53, &_decoded_andMatrixOutputs_T_60, &_decoded_andMatrixOutputs_T_65, &_decoded_andMatrixOutputs_T_67}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_1_5 = |{&_decoded_andMatrixOutputs_T_46, &_decoded_andMatrixOutputs_T_51, &_decoded_andMatrixOutputs_T_53, &_decoded_andMatrixOutputs_T_60, &_decoded_andMatrixOutputs_T_65, &_decoded_andMatrixOutputs_T_67}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_1_6 = |{&_decoded_andMatrixOutputs_T_46, &_decoded_andMatrixOutputs_T_51, &_decoded_andMatrixOutputs_T_53, &_decoded_andMatrixOutputs_T_60, &_decoded_andMatrixOutputs_T_65, &_decoded_andMatrixOutputs_T_67}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_1_7 = |{&_decoded_andMatrixOutputs_T_46, &_decoded_andMatrixOutputs_T_51, &_decoded_andMatrixOutputs_T_53, &_decoded_andMatrixOutputs_T_60, &_decoded_andMatrixOutputs_T_65, &_decoded_andMatrixOutputs_T_67}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_0_1 = |{&_decoded_andMatrixOutputs_T_39, &_decoded_andMatrixOutputs_T_42, &_decoded_andMatrixOutputs_T_44, &_decoded_andMatrixOutputs_T_47, &_decoded_andMatrixOutputs_T_52, &_decoded_andMatrixOutputs_T_54, &_decoded_andMatrixOutputs_T_61, &_decoded_andMatrixOutputs_T_66, &_decoded_andMatrixOutputs_T_68}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_0_2 = |{&_decoded_andMatrixOutputs_T_39, &_decoded_andMatrixOutputs_T_42, &_decoded_andMatrixOutputs_T_44, &_decoded_andMatrixOutputs_T_47, &_decoded_andMatrixOutputs_T_52, &_decoded_andMatrixOutputs_T_54, &_decoded_andMatrixOutputs_T_61, &_decoded_andMatrixOutputs_T_66, &_decoded_andMatrixOutputs_T_68}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_0_3 = |{&_decoded_andMatrixOutputs_T_39, &_decoded_andMatrixOutputs_T_42, &_decoded_andMatrixOutputs_T_44, &_decoded_andMatrixOutputs_T_47, &_decoded_andMatrixOutputs_T_52, &_decoded_andMatrixOutputs_T_54, &_decoded_andMatrixOutputs_T_56, &_decoded_andMatrixOutputs_T_58, &_decoded_andMatrixOutputs_T_61, &_decoded_andMatrixOutputs_T_66, &_decoded_andMatrixOutputs_T_68}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_0_4 = |{&_decoded_andMatrixOutputs_T_39, &_decoded_andMatrixOutputs_T_42, &_decoded_andMatrixOutputs_T_44, &_decoded_andMatrixOutputs_T_47, &_decoded_andMatrixOutputs_T_52, &_decoded_andMatrixOutputs_T_54, &_decoded_andMatrixOutputs_T_56, &_decoded_andMatrixOutputs_T_58, &_decoded_andMatrixOutputs_T_61, &_decoded_andMatrixOutputs_T_66, &_decoded_andMatrixOutputs_T_68}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_0_5 = |{&_decoded_andMatrixOutputs_T_39, &_decoded_andMatrixOutputs_T_42, &_decoded_andMatrixOutputs_T_44, &_decoded_andMatrixOutputs_T_47, &_decoded_andMatrixOutputs_T_52, &_decoded_andMatrixOutputs_T_54, &_decoded_andMatrixOutputs_T_56, &_decoded_andMatrixOutputs_T_58, &_decoded_andMatrixOutputs_T_61, &_decoded_andMatrixOutputs_T_66, &_decoded_andMatrixOutputs_T_68}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_0_6 = |{&_decoded_andMatrixOutputs_T_39, &_decoded_andMatrixOutputs_T_42, &_decoded_andMatrixOutputs_T_44, &_decoded_andMatrixOutputs_T_47, &_decoded_andMatrixOutputs_T_52, &_decoded_andMatrixOutputs_T_54, &_decoded_andMatrixOutputs_T_56, &_decoded_andMatrixOutputs_T_58, &_decoded_andMatrixOutputs_T_61, &_decoded_andMatrixOutputs_T_66, &_decoded_andMatrixOutputs_T_68}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_2_vc_sel_0_7 = |{&_decoded_andMatrixOutputs_T_39, &_decoded_andMatrixOutputs_T_42, &_decoded_andMatrixOutputs_T_44, &_decoded_andMatrixOutputs_T_47, &_decoded_andMatrixOutputs_T_52, &_decoded_andMatrixOutputs_T_54, &_decoded_andMatrixOutputs_T_56, &_decoded_andMatrixOutputs_T_58, &_decoded_andMatrixOutputs_T_61, &_decoded_andMatrixOutputs_T_66, &_decoded_andMatrixOutputs_T_68}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_1_vc_sel_3_4 = |{&{decoded_invInputs_1[0], decoded_invInputs_1[1], decoded_invInputs_1[3], io_req_1_bits_flow_egress_node[2], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[2]}, &{decoded_invInputs_1[0], decoded_invInputs_1[1], decoded_invInputs_1[3], decoded_invInputs_1[4], io_req_1_bits_flow_egress_node[3], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[2]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_1_vc_sel_3_5 = |{&_decoded_andMatrixOutputs_T_26, &_decoded_andMatrixOutputs_T_28, &_decoded_andMatrixOutputs_T_32, &_decoded_andMatrixOutputs_T_34}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_1_vc_sel_3_6 = |{&_decoded_andMatrixOutputs_T_26, &_decoded_andMatrixOutputs_T_28, &_decoded_andMatrixOutputs_T_32, &_decoded_andMatrixOutputs_T_34}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_1_vc_sel_3_7 = |{&_decoded_andMatrixOutputs_T_26, &_decoded_andMatrixOutputs_T_28, &_decoded_andMatrixOutputs_T_32, &_decoded_andMatrixOutputs_T_34}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_1_vc_sel_2_4 = |{&{decoded_invInputs_1[0], decoded_invInputs_1[1], io_req_1_bits_flow_egress_node[0], decoded_invInputs_1[4], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[2]}, &{decoded_invInputs_1[0], decoded_invInputs_1[1], io_req_1_bits_flow_egress_node[0], decoded_invInputs_1[4], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[2]}, &{decoded_invInputs_1[0], decoded_invInputs_1[1], io_req_1_bits_flow_egress_node[1], io_req_1_bits_flow_egress_node[2], decoded_invInputs_1[5], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[2]}, &{decoded_invInputs_1[0], decoded_invInputs_1[1], io_req_1_bits_flow_egress_node[1], decoded_invInputs_1[4], io_req_1_bits_flow_egress_node[3], decoded_invInputs_1[6], decoded_invInputs_1[7], decoded_invInputs_1[8], decoded_invInputs_1[9], decoded_invInputs_1[10], decoded_invInputs_1[11], decoded_invInputs_1[12], decoded_invInputs_1[13], decoded_invInputs_1[14], decoded_invInputs_1[15], io_req_1_bits_flow_vnet_id[2], io_req_1_bits_src_virt_id[2]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_1_vc_sel_2_5 = |{&_decoded_andMatrixOutputs_T_24, &_decoded_andMatrixOutputs_T_25, &_decoded_andMatrixOutputs_T_27, &_decoded_andMatrixOutputs_T_29, &_decoded_andMatrixOutputs_T_30, &_decoded_andMatrixOutputs_T_31, &_decoded_andMatrixOutputs_T_33, &_decoded_andMatrixOutputs_T_35}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_1_vc_sel_2_6 = |{&_decoded_andMatrixOutputs_T_24, &_decoded_andMatrixOutputs_T_25, &_decoded_andMatrixOutputs_T_27, &_decoded_andMatrixOutputs_T_29, &_decoded_andMatrixOutputs_T_30, &_decoded_andMatrixOutputs_T_31, &_decoded_andMatrixOutputs_T_33, &_decoded_andMatrixOutputs_T_35}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_1_vc_sel_2_7 = |{&_decoded_andMatrixOutputs_T_24, &_decoded_andMatrixOutputs_T_25, &_decoded_andMatrixOutputs_T_27, &_decoded_andMatrixOutputs_T_29, &_decoded_andMatrixOutputs_T_30, &_decoded_andMatrixOutputs_T_31, &_decoded_andMatrixOutputs_T_33, &_decoded_andMatrixOutputs_T_35}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_3_0 = &{decoded_invInputs[0], io_req_0_bits_flow_egress_node_id[1], io_req_0_bits_flow_egress_node[0]}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}] assign io_resp_0_vc_sel_3_1 = &_decoded_andMatrixOutputs_T_1; // @[pla.scala:98:{53,70}] assign io_resp_0_vc_sel_3_2 = |{&_decoded_andMatrixOutputs_T_1, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[3], io_req_0_bits_flow_egress_node[2], decoded_invInputs[5], decoded_invInputs[6], io_req_0_bits_flow_ingress_node_id[0], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[1]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[3], decoded_invInputs[4], io_req_0_bits_flow_egress_node[3], decoded_invInputs[6], io_req_0_bits_flow_ingress_node_id[0], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[1]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[3], io_req_0_bits_flow_egress_node[2], decoded_invInputs[5], decoded_invInputs[6], io_req_0_bits_flow_ingress_node_id[0], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[2]}, &{io_req_0_bits_flow_egress_node_id[0], decoded_invInputs[1], decoded_invInputs[3], decoded_invInputs[4], io_req_0_bits_flow_egress_node[3], decoded_invInputs[6], io_req_0_bits_flow_ingress_node_id[0], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], io_req_0_bits_flow_vnet_id[1], decoded_invInputs[16], io_req_0_bits_src_virt_id[2]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_3_3 = |{&_decoded_andMatrixOutputs_T_1, &_decoded_andMatrixOutputs_T_3, &_decoded_andMatrixOutputs_T_8}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_3_4 = |{&_decoded_andMatrixOutputs_T_1, &_decoded_andMatrixOutputs_T_3, &_decoded_andMatrixOutputs_T_8, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[3], io_req_0_bits_flow_egress_node[2], decoded_invInputs[5], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], io_req_0_bits_src_virt_id[2]}, &{decoded_invInputs[0], decoded_invInputs[1], decoded_invInputs[3], decoded_invInputs[4], io_req_0_bits_flow_egress_node[3], decoded_invInputs[6], decoded_invInputs[7], decoded_invInputs[8], io_req_0_bits_flow_ingress_node[0], decoded_invInputs[10], decoded_invInputs[11], decoded_invInputs[12], decoded_invInputs[13], decoded_invInputs[14], decoded_invInputs[15], io_req_0_bits_flow_vnet_id[2], io_req_0_bits_src_virt_id[2]}}; // @[pla.scala:78:21, :90:45, :91:29, :98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_3_5 = |{&_decoded_andMatrixOutputs_T_1, &_decoded_andMatrixOutputs_T_3, &_decoded_andMatrixOutputs_T_8, &_decoded_andMatrixOutputs_T_14, &_decoded_andMatrixOutputs_T_16}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_3_6 = |{&_decoded_andMatrixOutputs_T_1, &_decoded_andMatrixOutputs_T_3, &_decoded_andMatrixOutputs_T_8, &_decoded_andMatrixOutputs_T_14, &_decoded_andMatrixOutputs_T_16}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_3_7 = |{&_decoded_andMatrixOutputs_T_1, &_decoded_andMatrixOutputs_T_3, &_decoded_andMatrixOutputs_T_8, &_decoded_andMatrixOutputs_T_14, &_decoded_andMatrixOutputs_T_16}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_2_0 = &_decoded_andMatrixOutputs_T; // @[pla.scala:98:{53,70}] assign io_resp_0_vc_sel_2_1 = &_decoded_andMatrixOutputs_T; // @[pla.scala:98:{53,70}] assign io_resp_0_vc_sel_2_2 = |{&_decoded_andMatrixOutputs_T, &_decoded_andMatrixOutputs_T_5, &_decoded_andMatrixOutputs_T_7, &_decoded_andMatrixOutputs_T_10, &_decoded_andMatrixOutputs_T_12}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_2_3 = |{&_decoded_andMatrixOutputs_T, &_decoded_andMatrixOutputs_T_5, &_decoded_andMatrixOutputs_T_7, &_decoded_andMatrixOutputs_T_10, &_decoded_andMatrixOutputs_T_12}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_2_4 = |{&_decoded_andMatrixOutputs_T, &_decoded_andMatrixOutputs_T_5, &_decoded_andMatrixOutputs_T_7, &_decoded_andMatrixOutputs_T_10, &_decoded_andMatrixOutputs_T_12, &_decoded_andMatrixOutputs_T_13}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_2_5 = |{&_decoded_andMatrixOutputs_T, &_decoded_andMatrixOutputs_T_5, &_decoded_andMatrixOutputs_T_7, &_decoded_andMatrixOutputs_T_10, &_decoded_andMatrixOutputs_T_12, &_decoded_andMatrixOutputs_T_13}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_2_6 = |{&_decoded_andMatrixOutputs_T, &_decoded_andMatrixOutputs_T_5, &_decoded_andMatrixOutputs_T_7, &_decoded_andMatrixOutputs_T_10, &_decoded_andMatrixOutputs_T_12, &_decoded_andMatrixOutputs_T_13}; // @[pla.scala:98:{53,70}, :114:{19,36}] assign io_resp_0_vc_sel_2_7 = |{&_decoded_andMatrixOutputs_T, &_decoded_andMatrixOutputs_T_5, &_decoded_andMatrixOutputs_T_7, &_decoded_andMatrixOutputs_T_10, &_decoded_andMatrixOutputs_T_12, &_decoded_andMatrixOutputs_T_13}; // @[pla.scala:98:{53,70}, :114:{19,36}] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_72( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [8:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire a_first_done = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [8:0] address; // @[Monitor.scala:391:22] reg d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [1:0] size_1; // @[Monitor.scala:540:22] reg source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [1:0] inflight; // @[Monitor.scala:614:27] reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [3:0] inflight_sizes; // @[Monitor.scala:618:33] reg a_first_counter_1; // @[Edges.scala:229:27] reg d_first_counter_1; // @[Edges.scala:229:27] wire a_set = a_first_done & ~a_first_counter_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [1:0] inflight_1; // @[Monitor.scala:726:35] reg [3:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg d_first_counter_2; // @[Edges.scala:229:27] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File SourceB.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class SourceBRequest(params: InclusiveCacheParameters) extends InclusiveCacheBundle(params) { val param = UInt(3.W) val tag = UInt(params.tagBits.W) val set = UInt(params.setBits.W) val clients = UInt(params.clientBits.W) } class SourceB(params: InclusiveCacheParameters) extends Module { val io = IO(new Bundle { val req = Flipped(Decoupled(new SourceBRequest(params))) val b = Decoupled(new TLBundleB(params.inner.bundle)) }) if (params.firstLevel) { // Tie off unused ports io.req.ready := true.B io.b.valid := false.B io.b.bits := DontCare } else { val remain = RegInit(0.U(params.clientBits.W)) val remain_set = WireInit(init = 0.U(params.clientBits.W)) val remain_clr = WireInit(init = 0.U(params.clientBits.W)) remain := (remain | remain_set) & ~remain_clr val busy = remain.orR val todo = Mux(busy, remain, io.req.bits.clients) val next = ~(leftOR(todo) << 1) & todo if (params.clientBits > 1) { params.ccover(PopCount(remain) > 1.U, "SOURCEB_MULTI_PROBE", "Had to probe more than one client") } assert (!io.req.valid || io.req.bits.clients =/= 0.U) io.req.ready := !busy when (io.req.fire) { remain_set := io.req.bits.clients } // No restrictions on the type of buffer used here val b = Wire(chiselTypeOf(io.b)) io.b <> params.micro.innerBuf.b(b) b.valid := busy || io.req.valid when (b.fire) { remain_clr := next } params.ccover(b.valid && !b.ready, "SOURCEB_STALL", "Backpressured when issuing a probe") val tag = Mux(!busy, io.req.bits.tag, RegEnable(io.req.bits.tag, io.req.fire)) val set = Mux(!busy, io.req.bits.set, RegEnable(io.req.bits.set, io.req.fire)) val param = Mux(!busy, io.req.bits.param, RegEnable(io.req.bits.param, io.req.fire)) b.bits.opcode := TLMessages.Probe b.bits.param := param b.bits.size := params.offsetBits .U b.bits.source := params.clientSource(next) b.bits.address := params.expandAddress(tag, set, 0.U) b.bits.mask := ~0.U(params.inner.manager.beatBytes.W) b.bits.data := 0.U b.bits.corrupt := false.B } } File Parameters.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property.cover import scala.math.{min,max} case class CacheParameters( level: Int, ways: Int, sets: Int, blockBytes: Int, beatBytes: Int, // inner hintsSkipProbe: Boolean) { require (ways > 0) require (sets > 0) require (blockBytes > 0 && isPow2(blockBytes)) require (beatBytes > 0 && isPow2(beatBytes)) require (blockBytes >= beatBytes) val blocks = ways * sets val sizeBytes = blocks * blockBytes val blockBeats = blockBytes/beatBytes } case class InclusiveCachePortParameters( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams) { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new TLBuffer(a, b, c, d, e)) } object InclusiveCachePortParameters { val none = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.none) val full = InclusiveCachePortParameters( a = BufferParams.default, b = BufferParams.default, c = BufferParams.default, d = BufferParams.default, e = BufferParams.default) // This removes feed-through paths from C=>A and A=>C val fullC = InclusiveCachePortParameters( a = BufferParams.none, b = BufferParams.none, c = BufferParams.default, d = BufferParams.none, e = BufferParams.none) val flowAD = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.flow, e = BufferParams.none) val flowAE = InclusiveCachePortParameters( a = BufferParams.flow, b = BufferParams.none, c = BufferParams.none, d = BufferParams.none, e = BufferParams.flow) // For innerBuf: // SinkA: no restrictions, flows into scheduler+putbuffer // SourceB: no restrictions, flows out of scheduler // sinkC: no restrictions, flows into scheduler+putbuffer & buffered to bankedStore // SourceD: no restrictions, flows out of bankedStore/regout // SinkE: no restrictions, flows into scheduler // // ... so while none is possible, you probably want at least flowAC to cut ready // from the scheduler delay and flowD to ease SourceD back-pressure // For outerBufer: // SourceA: must not be pipe, flows out of scheduler // SinkB: no restrictions, flows into scheduler // SourceC: pipe is useless, flows out of bankedStore/regout, parameter depth ignored // SinkD: no restrictions, flows into scheduler & bankedStore // SourceE: must not be pipe, flows out of scheduler // // ... AE take the channel ready into the scheduler, so you need at least flowAE } case class InclusiveCacheMicroParameters( writeBytes: Int, // backing store update granularity memCycles: Int = 40, // # of L2 clock cycles for a memory round-trip (50ns @ 800MHz) portFactor: Int = 4, // numSubBanks = (widest TL port * portFactor) / writeBytes dirReg: Boolean = false, innerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.fullC, // or none outerBuf: InclusiveCachePortParameters = InclusiveCachePortParameters.full) // or flowAE { require (writeBytes > 0 && isPow2(writeBytes)) require (memCycles > 0) require (portFactor >= 2) // for inner RMW and concurrent outer Relase + Grant } case class InclusiveCacheControlParameters( address: BigInt, beatBytes: Int, bankedControl: Boolean) case class InclusiveCacheParameters( cache: CacheParameters, micro: InclusiveCacheMicroParameters, control: Boolean, inner: TLEdgeIn, outer: TLEdgeOut)(implicit val p: Parameters) { require (cache.ways > 1) require (cache.sets > 1 && isPow2(cache.sets)) require (micro.writeBytes <= inner.manager.beatBytes) require (micro.writeBytes <= outer.manager.beatBytes) require (inner.manager.beatBytes <= cache.blockBytes) require (outer.manager.beatBytes <= cache.blockBytes) // Require that all cached address ranges have contiguous blocks outer.manager.managers.flatMap(_.address).foreach { a => require (a.alignment >= cache.blockBytes) } // If we are the first level cache, we do not need to support inner-BCE val firstLevel = !inner.client.clients.exists(_.supports.probe) // If we are the last level cache, we do not need to support outer-B val lastLevel = !outer.manager.managers.exists(_.regionType > RegionType.UNCACHED) require (lastLevel) // Provision enough resources to achieve full throughput with missing single-beat accesses val mshrs = InclusiveCacheParameters.all_mshrs(cache, micro) val secondary = max(mshrs, micro.memCycles - mshrs) val putLists = micro.memCycles // allow every request to be single beat val putBeats = max(2*cache.blockBeats, micro.memCycles) val relLists = 2 val relBeats = relLists*cache.blockBeats val flatAddresses = AddressSet.unify(outer.manager.managers.flatMap(_.address)) val pickMask = AddressDecoder(flatAddresses.map(Seq(_)), flatAddresses.map(_.mask).reduce(_|_)) def bitOffsets(x: BigInt, offset: Int = 0, tail: List[Int] = List.empty[Int]): List[Int] = if (x == 0) tail.reverse else bitOffsets(x >> 1, offset + 1, if ((x & 1) == 1) offset :: tail else tail) val addressMapping = bitOffsets(pickMask) val addressBits = addressMapping.size // println(s"addresses: ${flatAddresses} => ${pickMask} => ${addressBits}") val allClients = inner.client.clients.size val clientBitsRaw = inner.client.clients.filter(_.supports.probe).size val clientBits = max(1, clientBitsRaw) val stateBits = 2 val wayBits = log2Ceil(cache.ways) val setBits = log2Ceil(cache.sets) val offsetBits = log2Ceil(cache.blockBytes) val tagBits = addressBits - setBits - offsetBits val putBits = log2Ceil(max(putLists, relLists)) require (tagBits > 0) require (offsetBits > 0) val innerBeatBits = (offsetBits - log2Ceil(inner.manager.beatBytes)) max 1 val outerBeatBits = (offsetBits - log2Ceil(outer.manager.beatBytes)) max 1 val innerMaskBits = inner.manager.beatBytes / micro.writeBytes val outerMaskBits = outer.manager.beatBytes / micro.writeBytes def clientBit(source: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Cat(inner.client.clients.filter(_.supports.probe).map(_.sourceId.contains(source)).reverse) } } def clientSource(bit: UInt): UInt = { if (clientBitsRaw == 0) { 0.U } else { Mux1H(bit, inner.client.clients.filter(_.supports.probe).map(c => c.sourceId.start.U)) } } def parseAddress(x: UInt): (UInt, UInt, UInt) = { val offset = Cat(addressMapping.map(o => x(o,o)).reverse) val set = offset >> offsetBits val tag = set >> setBits (tag(tagBits-1, 0), set(setBits-1, 0), offset(offsetBits-1, 0)) } def widen(x: UInt, width: Int): UInt = { val y = x | 0.U(width.W) assert (y >> width === 0.U) y(width-1, 0) } def expandAddress(tag: UInt, set: UInt, offset: UInt): UInt = { val base = Cat(widen(tag, tagBits), widen(set, setBits), widen(offset, offsetBits)) val bits = Array.fill(outer.bundle.addressBits) { 0.U(1.W) } addressMapping.zipWithIndex.foreach { case (a, i) => bits(a) = base(i,i) } Cat(bits.reverse) } def restoreAddress(expanded: UInt): UInt = { val missingBits = flatAddresses .map { a => (a.widen(pickMask).base, a.widen(~pickMask)) } // key is the bits to restore on match .groupBy(_._1) .view .mapValues(_.map(_._2)) val muxMask = AddressDecoder(missingBits.values.toList) val mux = missingBits.toList.map { case (bits, addrs) => val widen = addrs.map(_.widen(~muxMask)) val matches = AddressSet .unify(widen.distinct) .map(_.contains(expanded)) .reduce(_ || _) (matches, bits.U) } expanded | Mux1H(mux) } def dirReg[T <: Data](x: T, en: Bool = true.B): T = { if (micro.dirReg) RegEnable(x, en) else x } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = cover(cond, "CCACHE_L" + cache.level + "_" + label, "MemorySystem;;" + desc) } object MetaData { val stateBits = 2 def INVALID: UInt = 0.U(stateBits.W) // way is empty def BRANCH: UInt = 1.U(stateBits.W) // outer slave cache is trunk def TRUNK: UInt = 2.U(stateBits.W) // unique inner master cache is trunk def TIP: UInt = 3.U(stateBits.W) // we are trunk, inner masters are branch // Does a request need trunk? def needT(opcode: UInt, param: UInt): Bool = { !opcode(2) || (opcode === TLMessages.Hint && param === TLHints.PREFETCH_WRITE) || ((opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm) && param =/= TLPermissions.NtoB) } // Does a request prove the client need not be probed? def skipProbeN(opcode: UInt, hintsSkipProbe: Boolean): Bool = { // Acquire(toB) and Get => is N, so no probe // Acquire(*toT) => is N or B, but need T, so no probe // Hint => could be anything, so probe IS needed, if hintsSkipProbe is enabled, skip probe the same client // Put* => is N or B, so probe IS needed opcode === TLMessages.AcquireBlock || opcode === TLMessages.AcquirePerm || opcode === TLMessages.Get || (opcode === TLMessages.Hint && hintsSkipProbe.B) } def isToN(param: UInt): Bool = { param === TLPermissions.TtoN || param === TLPermissions.BtoN || param === TLPermissions.NtoN } def isToB(param: UInt): Bool = { param === TLPermissions.TtoB || param === TLPermissions.BtoB } } object InclusiveCacheParameters { val lfsrBits = 10 val L2ControlAddress = 0x2010000 val L2ControlSize = 0x1000 def out_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = { // We need 2-3 normal MSHRs to cover the Directory latency // To fully exploit memory bandwidth-delay-product, we need memCyles/blockBeats MSHRs max(if (micro.dirReg) 3 else 2, (micro.memCycles + cache.blockBeats - 1) / cache.blockBeats) } def all_mshrs(cache: CacheParameters, micro: InclusiveCacheMicroParameters): Int = // We need a dedicated MSHR for B+C each 2 + out_mshrs(cache, micro) } class InclusiveCacheBundle(params: InclusiveCacheParameters) extends Bundle
module SourceB( // @[SourceB.scala:33:7] input clock, // @[SourceB.scala:33:7] input reset, // @[SourceB.scala:33:7] output io_req_ready, // @[SourceB.scala:35:14] input io_req_valid, // @[SourceB.scala:35:14] input [2:0] io_req_bits_param, // @[SourceB.scala:35:14] input [12:0] io_req_bits_tag, // @[SourceB.scala:35:14] input [9:0] io_req_bits_set, // @[SourceB.scala:35:14] input [1:0] io_req_bits_clients, // @[SourceB.scala:35:14] input io_b_ready, // @[SourceB.scala:35:14] output io_b_valid, // @[SourceB.scala:35:14] output [1:0] io_b_bits_param, // @[SourceB.scala:35:14] output [5:0] io_b_bits_source, // @[SourceB.scala:35:14] output [31:0] io_b_bits_address // @[SourceB.scala:35:14] ); wire io_req_valid_0 = io_req_valid; // @[SourceB.scala:33:7] wire [2:0] io_req_bits_param_0 = io_req_bits_param; // @[SourceB.scala:33:7] wire [12:0] io_req_bits_tag_0 = io_req_bits_tag; // @[SourceB.scala:33:7] wire [9:0] io_req_bits_set_0 = io_req_bits_set; // @[SourceB.scala:33:7] wire [1:0] io_req_bits_clients_0 = io_req_bits_clients; // @[SourceB.scala:33:7] wire io_b_ready_0 = io_b_ready; // @[SourceB.scala:33:7] wire _b_bits_address_base_T_2 = reset; // @[Parameters.scala:222:12] wire _b_bits_address_base_T_8 = reset; // @[Parameters.scala:222:12] wire _b_bits_address_base_T_14 = reset; // @[Parameters.scala:222:12] wire [2:0] io_b_bits_opcode = 3'h6; // @[SourceB.scala:33:7] wire [2:0] io_b_bits_size = 3'h6; // @[SourceB.scala:33:7] wire [2:0] b_bits_opcode = 3'h6; // @[SourceB.scala:65:17] wire [2:0] b_bits_size = 3'h6; // @[SourceB.scala:65:17] wire [7:0] io_b_bits_mask = 8'hFF; // @[SourceB.scala:33:7] wire [7:0] b_bits_mask = 8'hFF; // @[SourceB.scala:65:17] wire [7:0] _b_bits_mask_T = 8'hFF; // @[SourceB.scala:81:23] wire [63:0] io_b_bits_data = 64'h0; // @[SourceB.scala:33:7] wire [63:0] b_bits_data = 64'h0; // @[SourceB.scala:65:17] wire io_b_bits_corrupt = 1'h0; // @[SourceB.scala:33:7] wire b_bits_corrupt = 1'h0; // @[SourceB.scala:65:17] wire _b_bits_address_base_T = 1'h0; // @[Parameters.scala:222:15] wire _b_bits_address_base_T_4 = 1'h0; // @[Parameters.scala:222:12] wire _b_bits_address_base_T_6 = 1'h0; // @[Parameters.scala:222:15] wire _b_bits_address_base_T_10 = 1'h0; // @[Parameters.scala:222:12] wire _b_bits_address_base_T_12 = 1'h0; // @[Parameters.scala:222:15] wire _b_bits_address_base_T_16 = 1'h0; // @[Parameters.scala:222:12] wire [1:0] b_bits_address_hi_hi_hi_lo = 2'h0; // @[Parameters.scala:230:8] wire [5:0] b_bits_address_base_y_2 = 6'h0; // @[Parameters.scala:221:15] wire [5:0] _b_bits_address_base_T_17 = 6'h0; // @[Parameters.scala:223:6] wire _b_bits_address_base_T_1 = 1'h1; // @[Parameters.scala:222:24] wire _b_bits_address_base_T_7 = 1'h1; // @[Parameters.scala:222:24] wire _b_bits_address_base_T_13 = 1'h1; // @[Parameters.scala:222:24] wire _io_req_ready_T; // @[SourceB.scala:61:21] wire b_ready = io_b_ready_0; // @[SourceB.scala:33:7, :65:17] wire b_valid; // @[SourceB.scala:65:17] wire [1:0] b_bits_param; // @[SourceB.scala:65:17] wire [5:0] b_bits_source; // @[SourceB.scala:65:17] wire [31:0] b_bits_address; // @[SourceB.scala:65:17] wire io_req_ready_0; // @[SourceB.scala:33:7] wire [1:0] io_b_bits_param_0; // @[SourceB.scala:33:7] wire [5:0] io_b_bits_source_0; // @[SourceB.scala:33:7] wire [31:0] io_b_bits_address_0; // @[SourceB.scala:33:7] wire io_b_valid_0; // @[SourceB.scala:33:7] reg [1:0] remain; // @[SourceB.scala:46:25] wire [1:0] remain_set; // @[SourceB.scala:47:30] wire [1:0] remain_clr; // @[SourceB.scala:48:30] wire [1:0] _remain_T = remain | remain_set; // @[SourceB.scala:46:25, :47:30, :49:23] wire [1:0] _remain_T_1 = ~remain_clr; // @[SourceB.scala:48:30, :49:39] wire [1:0] _remain_T_2 = _remain_T & _remain_T_1; // @[SourceB.scala:49:{23,37,39}] wire busy = |remain; // @[SourceB.scala:46:25, :51:23] wire [1:0] todo = busy ? remain : io_req_bits_clients_0; // @[SourceB.scala:33:7, :46:25, :51:23, :52:19] wire [2:0] _next_T = {todo, 1'h0}; // @[package.scala:253:48] wire [1:0] _next_T_1 = _next_T[1:0]; // @[package.scala:253:{48,53}] wire [1:0] _next_T_2 = todo | _next_T_1; // @[package.scala:253:{43,53}] wire [1:0] _next_T_3 = _next_T_2; // @[package.scala:253:43, :254:17] wire [2:0] _next_T_4 = {_next_T_3, 1'h0}; // @[package.scala:254:17] wire [2:0] _next_T_5 = ~_next_T_4; // @[SourceB.scala:53:{16,31}] wire [2:0] next = {1'h0, _next_T_5[1:0] & todo}; // @[SourceB.scala:52:19, :53:{16,37}]
Generate the Verilog code corresponding to the following Chisel files. File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File BootROM.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.devices.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.bundlebridge._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressSet, RegionType, TransferSizes} import freechips.rocketchip.resources.{Resource, SimpleDevice} import freechips.rocketchip.subsystem._ import freechips.rocketchip.tilelink.{TLFragmenter, TLManagerNode, TLSlaveParameters, TLSlavePortParameters} import java.nio.ByteBuffer import java.nio.file.{Files, Paths} /** Size, location and contents of the boot rom. */ case class BootROMParams( address: BigInt = 0x10000, size: Int = 0x10000, hang: BigInt = 0x10040, // The hang parameter is used as the power-on reset vector contentFileName: String) class TLROM(val base: BigInt, val size: Int, contentsDelayed: => Seq[Byte], executable: Boolean = true, beatBytes: Int = 4, resources: Seq[Resource] = new SimpleDevice("rom", Seq("sifive,rom0")).reg("mem"))(implicit p: Parameters) extends LazyModule { val node = TLManagerNode(Seq(TLSlavePortParameters.v1( Seq(TLSlaveParameters.v1( address = List(AddressSet(base, size-1)), resources = resources, regionType = RegionType.UNCACHED, executable = executable, supportsGet = TransferSizes(1, beatBytes), fifoId = Some(0))), beatBytes = beatBytes))) lazy val module = new Impl class Impl extends LazyModuleImp(this) { val contents = contentsDelayed val wrapSize = 1 << log2Ceil(contents.size) require (wrapSize <= size) val (in, edge) = node.in(0) val words = (contents ++ Seq.fill(wrapSize-contents.size)(0.toByte)).grouped(beatBytes).toSeq val bigs = words.map(_.foldRight(BigInt(0)){ case (x,y) => (x.toInt & 0xff) | y << 8}) val rom = VecInit(bigs.map(_.U((8*beatBytes).W))) in.d.valid := in.a.valid in.a.ready := in.d.ready val index = in.a.bits.address(log2Ceil(wrapSize)-1,log2Ceil(beatBytes)) val high = if (wrapSize == size) 0.U else in.a.bits.address(log2Ceil(size)-1, log2Ceil(wrapSize)) in.d.bits := edge.AccessAck(in.a.bits, Mux(high.orR, 0.U, rom(index))) // Tie off unused channels in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B } } case class BootROMLocated(loc: HierarchicalLocation) extends Field[Option[BootROMParams]](None) object BootROM { /** BootROM.attach not only instantiates a TLROM and attaches it to the tilelink interconnect * at a configurable location, but also drives the tiles' reset vectors to point * at its 'hang' address parameter value. */ def attach(params: BootROMParams, subsystem: BaseSubsystem with HasHierarchicalElements with HasTileInputConstants, where: TLBusWrapperLocation) (implicit p: Parameters): TLROM = { val tlbus = subsystem.locateTLBusWrapper(where) val bootROMDomainWrapper = tlbus.generateSynchronousDomain("BootROM").suggestName("bootrom_domain") val bootROMResetVectorSourceNode = BundleBridgeSource[UInt]() lazy val contents = { val romdata = Files.readAllBytes(Paths.get(params.contentFileName)) val rom = ByteBuffer.wrap(romdata) rom.array() ++ subsystem.dtb.contents } val bootrom = bootROMDomainWrapper { LazyModule(new TLROM(params.address, params.size, contents, true, tlbus.beatBytes)) } bootrom.node := tlbus.coupleTo("bootrom"){ TLFragmenter(tlbus, Some("BootROM")) := _ } // Drive the `subsystem` reset vector to the `hang` address of this Boot ROM. subsystem.tileResetVectorNexusNode := bootROMResetVectorSourceNode InModuleBody { val reset_vector_source = bootROMResetVectorSourceNode.bundle require(reset_vector_source.getWidth >= params.hang.bitLength, s"BootROM defined with a reset vector (${params.hang})too large for physical address space (${reset_vector_source.getWidth})") bootROMResetVectorSourceNode.bundle := params.hang.U } bootrom } }
module TLROM( // @[BootROM.scala:41:9] input clock, // @[BootROM.scala:41:9] input reset, // @[BootROM.scala:41:9] output auto_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [1:0] auto_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [11:0] auto_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [16:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input auto_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_in_d_valid, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [11:0] auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [63:0] auto_in_d_bits_data // @[LazyModuleImp.scala:107:25] ); wire [511:0][63:0] _GEN = '{64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h0, 64'h737470, 64'h75727265746E6900, 64'h746E657261702D74, 64'h7075727265746E69, 64'h736B636F6C6300, 64'h7665646E2C766373, 64'h697200797469726F, 64'h6972702D78616D2C, 64'h7663736972006863, 64'h617474612D677562, 64'h6564006465646E65, 64'h7478652D73747075, 64'h727265746E690073, 64'h656D616E2D747570, 64'h74756F2D6B636F6C, 64'h6300736C6C65632D, 64'h6B636F6C63230074, 64'h6E756F632D726873, 64'h6D2C657669666973, 64'h64656966696E75, 64'h2D6568636163006C, 64'h6576656C2D656863, 64'h61630073656D616E, 64'h2D67657200736567, 64'h6E617200656C646E, 64'h6168700072656C6C, 64'h6F72746E6F632D74, 64'h7075727265746E69, 64'h736C6C65632D74, 64'h7075727265746E69, 64'h230074696C70732D, 64'h626C740073757461, 64'h747300736E6F6967, 64'h6572706D702C7663, 64'h7369720079746972, 64'h616C756E61726770, 64'h6D702C7663736972, 64'h6173692C766373, 64'h6972006765720065, 64'h686361632D6C6576, 64'h656C2D7478656E00, 64'h657079742D756D6D, 64'h657A69732D626C, 64'h742D690073746573, 64'h2D626C742D690065, 64'h7A69732D65686361, 64'h632D690073746573, 64'h2D65686361632D69, 64'h657A69732D6B63, 64'h6F6C622D65686361, 64'h632D6900746E756F, 64'h632D746E696F706B, 64'h616572622D636578, 64'h652D657261776472, 64'h616800657079745F, 64'h6563697665640065, 64'h7A69732D626C742D, 64'h6400737465732D62, 64'h6C742D6400657A69, 64'h732D65686361632D, 64'h6400737465732D65, 64'h686361632D640065, 64'h7A69732D6B636F6C, 64'h622D65686361632D, 64'h640079636E657571, 64'h6572662D6B636F6C, 64'h630079636E657571, 64'h6572662D65736162, 64'h656D697400687461, 64'h702D74756F647473, 64'h306C6169726573, 64'h6C65646F6D0065, 64'h6C62697461706D6F, 64'h6300736C6C65632D, 64'h657A69732300736C, 64'h6C65632D73736572, 64'h6464612309000000, 64'h200000002000000, 64'h2000000006C6F72, 64'h746E6F63A8010000, 64'h800000003000000, 64'h10000000001100, 64'h2E01000008000000, 64'h300000000000000, 64'h3030303031314072, 64'h65747465732D7465, 64'h7365722D656C6974, 64'h100000002000000, 64'h6C6F72746E6F63, 64'hA801000008000000, 64'h300000000100000, 64'h2102E010000, 64'h800000003000000, 64'h100000055020000, 64'h400000003000000, 64'h600000044020000, 64'h400000003000000, 64'h30747261, 64'h752C657669666973, 64'h1B0000000D000000, 64'h300000005000000, 64'h3D02000004000000, 64'h300000000303030, 64'h3032303031406C61, 64'h6972657301000000, 64'h2000000006B636F, 64'h6C632D6465786966, 64'h1B0000000C000000, 64'h300000000006B63, 64'h6F6C635F73756273, 64'hEB0100000B000000, 64'h30000000065CD1D, 64'h5300000004000000, 64'h300000000000000, 64'hDE01000004000000, 64'h300000000006B63, 64'h6F6C635F73756273, 64'h100000002000000, 64'h6D656DA8010000, 64'h400000003000000, 64'h10000000100, 64'h2E01000008000000, 64'h300000000306D6F, 64'h722C657669666973, 64'h1B0000000C000000, 64'h300000000000030, 64'h30303031406D6F72, 64'h100000002000000, 64'h500000099010000, 64'h400000003000000, 64'h6B636F6C632D64, 64'h657869661B000000, 64'hC00000003000000, 64'h6B636F6C635F, 64'h73756270EB010000, 64'hB00000003000000, 64'h65CD1D53000000, 64'h400000003000000, 64'hDE010000, 64'h400000003000000, 64'h6B636F6C635F, 64'h7375627001000000, 64'h2000000006B636F, 64'h6C632D6465786966, 64'h1B0000000C000000, 64'h300000000006B63, 64'h6F6C635F7375626D, 64'hEB0100000B000000, 64'h30000000065CD1D, 64'h5300000004000000, 64'h300000000000000, 64'hDE01000004000000, 64'h300000000006B63, 64'h6F6C635F7375626D, 64'h100000002000000, 64'h600000099010000, 64'h400000003000000, 64'h100000032020000, 64'h400000003000000, 64'h10000001F020000, 64'h400000003000000, 64'h6C6F72746E6F63, 64'hA801000008000000, 64'h300000000000004, 64'hC2E010000, 64'h800000003000000, 64'h900000004000000, 64'hB00000004000000, 64'hFE01000010000000, 64'h300000084010000, 64'h3000000, 64'h3063696C702C76, 64'h637369721B000000, 64'hC00000003000000, 64'h100000073010000, 64'h400000003000000, 64'h30303030, 64'h3030634072656C6C, 64'h6F72746E6F632D74, 64'h7075727265746E69, 64'h100000002000000, 64'h6B636F6C632D64, 64'h657869661B000000, 64'hC00000003000000, 64'h6B636F6C635F, 64'h73756266EB010000, 64'hB00000003000000, 64'h65CD1D53000000, 64'h400000003000000, 64'hDE010000, 64'h400000003000000, 64'h6B636F6C635F, 64'h7375626601000000, 64'h200000000100000, 64'h3000002E010000, 64'h800000003000000, 64'h30726F7272, 64'h652C657669666973, 64'h1B0000000E000000, 64'h300000000000030, 64'h3030334065636976, 64'h65642D726F727265, 64'h100000002000000, 64'h6C6F72746E6F63, 64'hA801000008000000, 64'h300000000100000, 64'h2E010000, 64'h800000003000000, 64'hFFFF000004000000, 64'hFE01000008000000, 64'h300000000000000, 64'h6761746A12020000, 64'h500000003000000, 64'h3331302D, 64'h67756265642C7663, 64'h736972003331302D, 64'h67756265642C6576, 64'h696669731B000000, 64'h2100000003000000, 64'h304072656C6C, 64'h6F72746E6F632D67, 64'h7562656401000000, 64'h2000000006C6F72, 64'h746E6F63A8010000, 64'h800000003000000, 64'h10000000001000, 64'h2E01000008000000, 64'h300000000003030, 64'h3030303140726574, 64'h61672D6B636F6C63, 64'h100000002000000, 64'h6C6F72746E6F63, 64'hA801000008000000, 64'h300000000000100, 64'h22E010000, 64'h800000003000000, 64'h700000004000000, 64'h300000004000000, 64'hFE01000010000000, 64'h300000000000000, 64'h30746E696C632C76, 64'h637369721B000000, 64'hD00000003000000, 64'h3030303030, 64'h303240746E696C63, 64'h100000002000000, 64'h6B636F6C632D64, 64'h657869661B000000, 64'hC00000003000000, 64'h6B636F6C635F, 64'h73756263EB010000, 64'hB00000003000000, 64'h65CD1D53000000, 64'h400000003000000, 64'hDE010000, 64'h400000003000000, 64'h6B636F6C635F, 64'h7375626301000000, 64'h200000001000000, 64'h9901000004000000, 64'h300000007000000, 64'hCC01000004000000, 64'h3000000006C6F72, 64'h746E6F63A8010000, 64'h800000003000000, 64'h10000000000102, 64'h2E01000008000000, 64'h300000003000000, 64'h20000001D010000, 64'h800000003000000, 64'h65686361, 64'h6300306568636163, 64'h65766973756C636E, 64'h692C657669666973, 64'h1B0000001D000000, 64'h3000000BE010000, 64'h3000000, 64'h80085000000, 64'h400000003000000, 64'h4000078000000, 64'h400000003000000, 64'h2000000B2010000, 64'h400000003000000, 64'h4000000065000000, 64'h400000003000000, 64'h30303030, 64'h3130324072656C6C, 64'h6F72746E6F632D65, 64'h6863616301000000, 64'h2000000006C6F72, 64'h746E6F63A8010000, 64'h800000003000000, 64'h10000000100000, 64'h2E01000008000000, 64'h300000000000030, 64'h303031406765722D, 64'h737365726464612D, 64'h746F6F6201000000, 64'hA101000000000000, 64'h300000000737562, 64'h2D656C706D697300, 64'h636F732D64726179, 64'h706968632C726162, 64'h2D6263751B000000, 64'h2000000003000000, 64'h10000000F000000, 64'h400000003000000, 64'h100000000000000, 64'h400000003000000, 64'h636F7301000000, 64'h200000002000000, 64'h9901000004000000, 64'h300000000000010, 64'h802E010000, 64'h800000003000000, 64'h79726F6D656D, 64'hA600000007000000, 64'h300000000303030, 64'h3030303038407972, 64'h6F6D656D01000000, 64'h200000003000000, 64'h9901000004000000, 64'h300000000000000, 64'h64656C6261736964, 64'h6201000009000000, 64'h300000000000100, 64'h82E010000, 64'h800000003000000, 64'h79726F6D656D, 64'hA600000007000000, 64'h300000000003030, 64'h3030303038407972, 64'h6F6D656D01000000, 64'h200000000000030, 64'h666974682C626375, 64'h1B0000000A000000, 64'h300000000000000, 64'h6669746801000000, 64'h200000002000000, 64'h200000004000000, 64'h9901000004000000, 64'h300000084010000, 64'h3000000, 64'h63746E692D75, 64'h70632C7663736972, 64'h1B0000000F000000, 64'h300000001000000, 64'h7301000004000000, 64'h300000000000000, 64'h72656C6C6F72746E, 64'h6F632D7470757272, 64'h65746E6901000000, 64'h6901000000000000, 64'h300000020A10700, 64'h4000000004000000, 64'h300000000000000, 64'h79616B6F62010000, 64'h500000003000000, 64'h800000051010000, 64'h400000003000000, 64'h40000003C010000, 64'h400000003000000, 64'h74656B636F, 64'h72785F6262767A5F, 64'h73627A5F62627A5F, 64'h61627A5F68667A5F, 64'h6866767A5F643436, 64'h65767A5F62363532, 64'h6C767A5F6D706869, 64'h7A5F6965636E6566, 64'h697A5F727363697A, 64'h7662636466616D69, 64'h3436767232010000, 64'h5200000003000000, 64'h2E010000, 64'h400000003000000, 64'h10000001D010000, 64'h400000003000000, 64'h393376732C76, 64'h6373697214010000, 64'hB00000003000000, 64'h2000000009010000, 64'h400000003000000, 64'h1000000FE000000, 64'h400000003000000, 64'h800000F1000000, 64'h400000003000000, 64'h40000000E4000000, 64'h400000003000000, 64'h40000000D1000000, 64'h400000003000000, 64'h1000000B2000000, 64'h400000003000000, 64'h757063A6000000, 64'h400000003000000, 64'h200000009B000000, 64'h400000003000000, 64'h100000090000000, 64'h400000003000000, 64'h80000083000000, 64'h400000003000000, 64'h4000000076000000, 64'h400000003000000, 64'h4000000063000000, 64'h400000003000000, 64'h76637369, 64'h72003074656B636F, 64'h722C657669666973, 64'h1B00000015000000, 64'h300000000000000, 64'h5300000004000000, 64'h300000000000030, 64'h4075706301000000, 64'h20A1070040000000, 64'h400000003000000, 64'hF000000, 64'h400000003000000, 64'h100000000000000, 64'h400000003000000, 64'h73757063, 64'h100000002000000, 64'h30303030, 64'h32303031406C6169, 64'h7265732F636F732F, 64'h3400000015000000, 64'h300000000006E65, 64'h736F686301000000, 64'h200000000000000, 64'h3030303032303031, 64'h406C61697265732F, 64'h636F732F2C000000, 64'h1500000003000000, 64'h73657361696C61, 64'h100000000000000, 64'h6472617970696863, 64'h2C7261622D626375, 64'h2600000011000000, 64'h300000000000000, 64'h7665642D64726179, 64'h706968632C726162, 64'h2D6263751B000000, 64'h1500000003000000, 64'h10000000F000000, 64'h400000003000000, 64'h100000000000000, 64'h400000003000000, 64'h1000000, 64'h0, 64'h0, 64'h940B000060020000, 64'h10000000, 64'h1100000028000000, 64'hCC0B000038000000, 64'h2C0E0000EDFE0DD0, 64'h1330200073, 64'h3006307308000613, 64'h185859300000597, 64'hF140257334151073, 64'h5350300001537, 64'h5A02300B505B3, 64'h251513FE029EE3, 64'h5A283F81FF06F, 64'h0, 64'h0, 64'h2C0006F, 64'hFE069AE3FFC62683, 64'h46061300D62023, 64'h10069300458613, 64'h380006F00050463, 64'hF1402573020005B7, 64'hFFDFF06F, 64'h1050007330052073, 64'h3045107300800513, 64'h3445307322200513, 64'h3030107300028863, 64'h12F2934122D293, 64'h301022F330551073, 64'h405051300000517}; TLMonitor_54 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (auto_in_d_ready), .io_in_a_valid (auto_in_a_valid), .io_in_a_bits_opcode (auto_in_a_bits_opcode), .io_in_a_bits_param (auto_in_a_bits_param), .io_in_a_bits_size (auto_in_a_bits_size), .io_in_a_bits_source (auto_in_a_bits_source), .io_in_a_bits_address (auto_in_a_bits_address), .io_in_a_bits_mask (auto_in_a_bits_mask), .io_in_a_bits_corrupt (auto_in_a_bits_corrupt), .io_in_d_ready (auto_in_d_ready), .io_in_d_valid (auto_in_a_valid), .io_in_d_bits_size (auto_in_a_bits_size), .io_in_d_bits_source (auto_in_a_bits_source) ); // @[Nodes.scala:27:25] assign auto_in_a_ready = auto_in_d_ready; // @[BootROM.scala:41:9] assign auto_in_d_valid = auto_in_a_valid; // @[BootROM.scala:41:9] assign auto_in_d_bits_size = auto_in_a_bits_size; // @[BootROM.scala:41:9] assign auto_in_d_bits_source = auto_in_a_bits_source; // @[BootROM.scala:41:9] assign auto_in_d_bits_data = (|(auto_in_a_bits_address[15:12])) ? 64'h0 : _GEN[auto_in_a_bits_address[11:3]]; // @[BootROM.scala:41:9, :55:34, :56:64, :57:{47,53}] endmodule
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } File rename-freelist.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Rename FreeList //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.exu import chisel3._ import chisel3.util._ import boom.v3.common._ import boom.v3.util._ import org.chipsalliance.cde.config.Parameters class RenameFreeList( val plWidth: Int, val numPregs: Int, val numLregs: Int) (implicit p: Parameters) extends BoomModule { private val pregSz = log2Ceil(numPregs) private val n = numPregs val io = IO(new BoomBundle()(p) { // Physical register requests. val reqs = Input(Vec(plWidth, Bool())) val alloc_pregs = Output(Vec(plWidth, Valid(UInt(pregSz.W)))) // Pregs returned by the ROB. val dealloc_pregs = Input(Vec(plWidth, Valid(UInt(pregSz.W)))) // Branch info for starting new allocation lists. val ren_br_tags = Input(Vec(plWidth, Valid(UInt(brTagSz.W)))) // Mispredict info for recovering speculatively allocated registers. val brupdate = Input(new BrUpdateInfo) val debug = new Bundle { val pipeline_empty = Input(Bool()) val freelist = Output(Bits(numPregs.W)) val isprlist = Output(Bits(numPregs.W)) } }) // The free list register array and its branch allocation lists. val free_list = RegInit(UInt(numPregs.W), ~(1.U(numPregs.W))) val br_alloc_lists = Reg(Vec(maxBrCount, UInt(numPregs.W))) // Select pregs from the free list. val sels = SelectFirstN(free_list, plWidth) val sel_fire = Wire(Vec(plWidth, Bool())) // Allocations seen by branches in each pipeline slot. val allocs = io.alloc_pregs map (a => UIntToOH(a.bits)) val alloc_masks = (allocs zip io.reqs).scanRight(0.U(n.W)) { case ((a,r),m) => m | a & Fill(n,r) } // Masks that modify the freelist array. val sel_mask = (sels zip sel_fire) map { case (s,f) => s & Fill(n,f) } reduce(_|_) val br_deallocs = br_alloc_lists(io.brupdate.b2.uop.br_tag) & Fill(n, io.brupdate.b2.mispredict) val dealloc_mask = io.dealloc_pregs.map(d => UIntToOH(d.bits)(numPregs-1,0) & Fill(n,d.valid)).reduce(_|_) | br_deallocs val br_slots = VecInit(io.ren_br_tags.map(tag => tag.valid)).asUInt // Create branch allocation lists. for (i <- 0 until maxBrCount) { val list_req = VecInit(io.ren_br_tags.map(tag => UIntToOH(tag.bits)(i))).asUInt & br_slots val new_list = list_req.orR br_alloc_lists(i) := Mux(new_list, Mux1H(list_req, alloc_masks.slice(1, plWidth+1)), br_alloc_lists(i) & ~br_deallocs | alloc_masks(0)) } // Update the free list. free_list := (free_list & ~sel_mask | dealloc_mask) & ~(1.U(numPregs.W)) // Pipeline logic | hookup outputs. for (w <- 0 until plWidth) { val can_sel = sels(w).orR val r_valid = RegInit(false.B) val r_sel = RegEnable(OHToUInt(sels(w)), sel_fire(w)) r_valid := r_valid && !io.reqs(w) || can_sel sel_fire(w) := (!r_valid || io.reqs(w)) && can_sel io.alloc_pregs(w).bits := r_sel io.alloc_pregs(w).valid := r_valid } io.debug.freelist := free_list | io.alloc_pregs.map(p => UIntToOH(p.bits) & Fill(n,p.valid)).reduce(_|_) io.debug.isprlist := 0.U // TODO track commit free list. assert (!(io.debug.freelist & dealloc_mask).orR, "[freelist] Returning a free physical register.") assert (!io.debug.pipeline_empty || PopCount(io.debug.freelist) >= (numPregs - numLregs - 1).U, "[freelist] Leaking physical registers.") }
module RenameFreeList_2( // @[rename-freelist.scala:20:7] input clock, // @[rename-freelist.scala:20:7] input reset, // @[rename-freelist.scala:20:7] input io_reqs_0, // @[rename-freelist.scala:29:14] input io_reqs_1, // @[rename-freelist.scala:29:14] input io_reqs_2, // @[rename-freelist.scala:29:14] output io_alloc_pregs_0_valid, // @[rename-freelist.scala:29:14] output [6:0] io_alloc_pregs_0_bits, // @[rename-freelist.scala:29:14] output io_alloc_pregs_1_valid, // @[rename-freelist.scala:29:14] output [6:0] io_alloc_pregs_1_bits, // @[rename-freelist.scala:29:14] output io_alloc_pregs_2_valid, // @[rename-freelist.scala:29:14] output [6:0] io_alloc_pregs_2_bits, // @[rename-freelist.scala:29:14] input io_dealloc_pregs_0_valid, // @[rename-freelist.scala:29:14] input [6:0] io_dealloc_pregs_0_bits, // @[rename-freelist.scala:29:14] input io_dealloc_pregs_1_valid, // @[rename-freelist.scala:29:14] input [6:0] io_dealloc_pregs_1_bits, // @[rename-freelist.scala:29:14] input io_dealloc_pregs_2_valid, // @[rename-freelist.scala:29:14] input [6:0] io_dealloc_pregs_2_bits, // @[rename-freelist.scala:29:14] input io_ren_br_tags_0_valid, // @[rename-freelist.scala:29:14] input [3:0] io_ren_br_tags_0_bits, // @[rename-freelist.scala:29:14] input io_ren_br_tags_1_valid, // @[rename-freelist.scala:29:14] input [3:0] io_ren_br_tags_1_bits, // @[rename-freelist.scala:29:14] input io_ren_br_tags_2_valid, // @[rename-freelist.scala:29:14] input [3:0] io_ren_br_tags_2_bits, // @[rename-freelist.scala:29:14] input [15:0] io_brupdate_b1_resolve_mask, // @[rename-freelist.scala:29:14] input [15:0] io_brupdate_b1_mispredict_mask, // @[rename-freelist.scala:29:14] input [6:0] io_brupdate_b2_uop_uopc, // @[rename-freelist.scala:29:14] input [31:0] io_brupdate_b2_uop_inst, // @[rename-freelist.scala:29:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_is_rvc, // @[rename-freelist.scala:29:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[rename-freelist.scala:29:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[rename-freelist.scala:29:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[rename-freelist.scala:29:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[rename-freelist.scala:29:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[rename-freelist.scala:29:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[rename-freelist.scala:29:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[rename-freelist.scala:29:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[rename-freelist.scala:29:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_ctrl_is_load, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_ctrl_is_std, // @[rename-freelist.scala:29:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_is_br, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_is_jalr, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_is_jal, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_is_sfb, // @[rename-freelist.scala:29:14] input [15:0] io_brupdate_b2_uop_br_mask, // @[rename-freelist.scala:29:14] input [3:0] io_brupdate_b2_uop_br_tag, // @[rename-freelist.scala:29:14] input [4:0] io_brupdate_b2_uop_ftq_idx, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_edge_inst, // @[rename-freelist.scala:29:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_taken, // @[rename-freelist.scala:29:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[rename-freelist.scala:29:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[rename-freelist.scala:29:14] input [6:0] io_brupdate_b2_uop_rob_idx, // @[rename-freelist.scala:29:14] input [4:0] io_brupdate_b2_uop_ldq_idx, // @[rename-freelist.scala:29:14] input [4:0] io_brupdate_b2_uop_stq_idx, // @[rename-freelist.scala:29:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[rename-freelist.scala:29:14] input [6:0] io_brupdate_b2_uop_pdst, // @[rename-freelist.scala:29:14] input [6:0] io_brupdate_b2_uop_prs1, // @[rename-freelist.scala:29:14] input [6:0] io_brupdate_b2_uop_prs2, // @[rename-freelist.scala:29:14] input [6:0] io_brupdate_b2_uop_prs3, // @[rename-freelist.scala:29:14] input [4:0] io_brupdate_b2_uop_ppred, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_prs1_busy, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_prs2_busy, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_prs3_busy, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_ppred_busy, // @[rename-freelist.scala:29:14] input [6:0] io_brupdate_b2_uop_stale_pdst, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_exception, // @[rename-freelist.scala:29:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_bypassable, // @[rename-freelist.scala:29:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[rename-freelist.scala:29:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_mem_signed, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_is_fence, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_is_fencei, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_is_amo, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_uses_ldq, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_uses_stq, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_is_unique, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_flush_on_commit, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[rename-freelist.scala:29:14] input [5:0] io_brupdate_b2_uop_ldst, // @[rename-freelist.scala:29:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[rename-freelist.scala:29:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[rename-freelist.scala:29:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_ldst_val, // @[rename-freelist.scala:29:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[rename-freelist.scala:29:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[rename-freelist.scala:29:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_frs3_en, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_fp_val, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_fp_single, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_bp_debug_if, // @[rename-freelist.scala:29:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[rename-freelist.scala:29:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[rename-freelist.scala:29:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[rename-freelist.scala:29:14] input io_brupdate_b2_valid, // @[rename-freelist.scala:29:14] input io_brupdate_b2_mispredict, // @[rename-freelist.scala:29:14] input io_brupdate_b2_taken, // @[rename-freelist.scala:29:14] input [2:0] io_brupdate_b2_cfi_type, // @[rename-freelist.scala:29:14] input [1:0] io_brupdate_b2_pc_sel, // @[rename-freelist.scala:29:14] input [39:0] io_brupdate_b2_jalr_target, // @[rename-freelist.scala:29:14] input [20:0] io_brupdate_b2_target_offset, // @[rename-freelist.scala:29:14] input io_debug_pipeline_empty, // @[rename-freelist.scala:29:14] output [99:0] io_debug_freelist // @[rename-freelist.scala:29:14] ); wire io_reqs_0_0 = io_reqs_0; // @[rename-freelist.scala:20:7] wire io_reqs_1_0 = io_reqs_1; // @[rename-freelist.scala:20:7] wire io_reqs_2_0 = io_reqs_2; // @[rename-freelist.scala:20:7] wire io_dealloc_pregs_0_valid_0 = io_dealloc_pregs_0_valid; // @[rename-freelist.scala:20:7] wire [6:0] io_dealloc_pregs_0_bits_0 = io_dealloc_pregs_0_bits; // @[rename-freelist.scala:20:7] wire io_dealloc_pregs_1_valid_0 = io_dealloc_pregs_1_valid; // @[rename-freelist.scala:20:7] wire [6:0] io_dealloc_pregs_1_bits_0 = io_dealloc_pregs_1_bits; // @[rename-freelist.scala:20:7] wire io_dealloc_pregs_2_valid_0 = io_dealloc_pregs_2_valid; // @[rename-freelist.scala:20:7] wire [6:0] io_dealloc_pregs_2_bits_0 = io_dealloc_pregs_2_bits; // @[rename-freelist.scala:20:7] wire io_ren_br_tags_0_valid_0 = io_ren_br_tags_0_valid; // @[rename-freelist.scala:20:7] wire [3:0] io_ren_br_tags_0_bits_0 = io_ren_br_tags_0_bits; // @[rename-freelist.scala:20:7] wire io_ren_br_tags_1_valid_0 = io_ren_br_tags_1_valid; // @[rename-freelist.scala:20:7] wire [3:0] io_ren_br_tags_1_bits_0 = io_ren_br_tags_1_bits; // @[rename-freelist.scala:20:7] wire io_ren_br_tags_2_valid_0 = io_ren_br_tags_2_valid; // @[rename-freelist.scala:20:7] wire [3:0] io_ren_br_tags_2_bits_0 = io_ren_br_tags_2_bits; // @[rename-freelist.scala:20:7] wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[rename-freelist.scala:20:7] wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[rename-freelist.scala:20:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[rename-freelist.scala:20:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[rename-freelist.scala:20:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[rename-freelist.scala:20:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[rename-freelist.scala:20:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[rename-freelist.scala:20:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[rename-freelist.scala:20:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[rename-freelist.scala:20:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[rename-freelist.scala:20:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[rename-freelist.scala:20:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[rename-freelist.scala:20:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[rename-freelist.scala:20:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[rename-freelist.scala:20:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[rename-freelist.scala:20:7] wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[rename-freelist.scala:20:7] wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[rename-freelist.scala:20:7] wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[rename-freelist.scala:20:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[rename-freelist.scala:20:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[rename-freelist.scala:20:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[rename-freelist.scala:20:7] wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[rename-freelist.scala:20:7] wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[rename-freelist.scala:20:7] wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[rename-freelist.scala:20:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[rename-freelist.scala:20:7] wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[rename-freelist.scala:20:7] wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[rename-freelist.scala:20:7] wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[rename-freelist.scala:20:7] wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[rename-freelist.scala:20:7] wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[rename-freelist.scala:20:7] wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[rename-freelist.scala:20:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[rename-freelist.scala:20:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[rename-freelist.scala:20:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[rename-freelist.scala:20:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[rename-freelist.scala:20:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[rename-freelist.scala:20:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[rename-freelist.scala:20:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[rename-freelist.scala:20:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[rename-freelist.scala:20:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[rename-freelist.scala:20:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[rename-freelist.scala:20:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[rename-freelist.scala:20:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[rename-freelist.scala:20:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[rename-freelist.scala:20:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[rename-freelist.scala:20:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[rename-freelist.scala:20:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[rename-freelist.scala:20:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[rename-freelist.scala:20:7] wire io_debug_pipeline_empty_0 = io_debug_pipeline_empty; // @[rename-freelist.scala:20:7] wire [99:0] io_debug_isprlist = 100'h0; // @[rename-freelist.scala:20:7] wire [99:0] _br_alloc_lists_0_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_1_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_2_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_3_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_4_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_5_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_6_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_7_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_8_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_9_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_10_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_11_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_12_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_13_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_14_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_15_T_5 = 100'h0; // @[Mux.scala:30:73] wire [99:0] _free_list_T = 100'hFFFFFFFFFFFFFFFFFFFFFFFFE; // @[rename-freelist.scala:50:45] wire [99:0] _free_list_T_4 = 100'hFFFFFFFFFFFFFFFFFFFFFFFFE; // @[rename-freelist.scala:76:57] wire _br_slots_WIRE_0 = io_ren_br_tags_0_valid_0; // @[rename-freelist.scala:20:7, :66:25] wire _br_slots_WIRE_1 = io_ren_br_tags_1_valid_0; // @[rename-freelist.scala:20:7, :66:25] wire _br_slots_WIRE_2 = io_ren_br_tags_2_valid_0; // @[rename-freelist.scala:20:7, :66:25] wire io_alloc_pregs_0_valid_0; // @[rename-freelist.scala:20:7] wire [6:0] io_alloc_pregs_0_bits_0; // @[rename-freelist.scala:20:7] wire io_alloc_pregs_1_valid_0; // @[rename-freelist.scala:20:7] wire [6:0] io_alloc_pregs_1_bits_0; // @[rename-freelist.scala:20:7] wire io_alloc_pregs_2_valid_0; // @[rename-freelist.scala:20:7] wire [6:0] io_alloc_pregs_2_bits_0; // @[rename-freelist.scala:20:7] wire [99:0] io_debug_freelist_0; // @[rename-freelist.scala:20:7] reg [99:0] free_list; // @[rename-freelist.scala:50:26] reg [99:0] br_alloc_lists_0; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_1; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_2; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_3; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_4; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_5; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_6; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_7; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_8; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_9; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_10; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_11; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_12; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_13; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_14; // @[rename-freelist.scala:51:27] reg [99:0] br_alloc_lists_15; // @[rename-freelist.scala:51:27] wire [99:0] _sels_sels_0_T_199; // @[Mux.scala:50:70] wire [99:0] _sels_sels_1_T_199; // @[Mux.scala:50:70] wire [99:0] _sels_sels_2_T_199; // @[Mux.scala:50:70] wire [99:0] sels_0; // @[util.scala:405:20] wire [99:0] sels_1; // @[util.scala:405:20] wire [99:0] sels_2; // @[util.scala:405:20] wire _sels_sels_0_T = free_list[0]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_1 = free_list[1]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_2 = free_list[2]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_3 = free_list[3]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_4 = free_list[4]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_5 = free_list[5]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_6 = free_list[6]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_7 = free_list[7]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_8 = free_list[8]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_9 = free_list[9]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_10 = free_list[10]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_11 = free_list[11]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_12 = free_list[12]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_13 = free_list[13]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_14 = free_list[14]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_15 = free_list[15]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_16 = free_list[16]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_17 = free_list[17]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_18 = free_list[18]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_19 = free_list[19]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_20 = free_list[20]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_21 = free_list[21]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_22 = free_list[22]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_23 = free_list[23]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_24 = free_list[24]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_25 = free_list[25]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_26 = free_list[26]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_27 = free_list[27]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_28 = free_list[28]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_29 = free_list[29]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_30 = free_list[30]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_31 = free_list[31]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_32 = free_list[32]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_33 = free_list[33]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_34 = free_list[34]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_35 = free_list[35]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_36 = free_list[36]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_37 = free_list[37]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_38 = free_list[38]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_39 = free_list[39]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_40 = free_list[40]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_41 = free_list[41]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_42 = free_list[42]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_43 = free_list[43]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_44 = free_list[44]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_45 = free_list[45]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_46 = free_list[46]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_47 = free_list[47]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_48 = free_list[48]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_49 = free_list[49]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_50 = free_list[50]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_51 = free_list[51]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_52 = free_list[52]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_53 = free_list[53]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_54 = free_list[54]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_55 = free_list[55]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_56 = free_list[56]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_57 = free_list[57]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_58 = free_list[58]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_59 = free_list[59]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_60 = free_list[60]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_61 = free_list[61]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_62 = free_list[62]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_63 = free_list[63]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_64 = free_list[64]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_65 = free_list[65]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_66 = free_list[66]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_67 = free_list[67]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_68 = free_list[68]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_69 = free_list[69]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_70 = free_list[70]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_71 = free_list[71]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_72 = free_list[72]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_73 = free_list[73]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_74 = free_list[74]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_75 = free_list[75]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_76 = free_list[76]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_77 = free_list[77]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_78 = free_list[78]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_79 = free_list[79]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_80 = free_list[80]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_81 = free_list[81]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_82 = free_list[82]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_83 = free_list[83]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_84 = free_list[84]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_85 = free_list[85]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_86 = free_list[86]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_87 = free_list[87]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_88 = free_list[88]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_89 = free_list[89]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_90 = free_list[90]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_91 = free_list[91]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_92 = free_list[92]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_93 = free_list[93]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_94 = free_list[94]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_95 = free_list[95]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_96 = free_list[96]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_97 = free_list[97]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_98 = free_list[98]; // @[OneHot.scala:85:71] wire _sels_sels_0_T_99 = free_list[99]; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_100 = {_sels_sels_0_T_99, 99'h0}; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_101 = _sels_sels_0_T_98 ? 100'h4000000000000000000000000 : _sels_sels_0_T_100; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_102 = _sels_sels_0_T_97 ? 100'h2000000000000000000000000 : _sels_sels_0_T_101; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_103 = _sels_sels_0_T_96 ? 100'h1000000000000000000000000 : _sels_sels_0_T_102; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_104 = _sels_sels_0_T_95 ? 100'h800000000000000000000000 : _sels_sels_0_T_103; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_105 = _sels_sels_0_T_94 ? 100'h400000000000000000000000 : _sels_sels_0_T_104; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_106 = _sels_sels_0_T_93 ? 100'h200000000000000000000000 : _sels_sels_0_T_105; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_107 = _sels_sels_0_T_92 ? 100'h100000000000000000000000 : _sels_sels_0_T_106; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_108 = _sels_sels_0_T_91 ? 100'h80000000000000000000000 : _sels_sels_0_T_107; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_109 = _sels_sels_0_T_90 ? 100'h40000000000000000000000 : _sels_sels_0_T_108; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_110 = _sels_sels_0_T_89 ? 100'h20000000000000000000000 : _sels_sels_0_T_109; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_111 = _sels_sels_0_T_88 ? 100'h10000000000000000000000 : _sels_sels_0_T_110; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_112 = _sels_sels_0_T_87 ? 100'h8000000000000000000000 : _sels_sels_0_T_111; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_113 = _sels_sels_0_T_86 ? 100'h4000000000000000000000 : _sels_sels_0_T_112; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_114 = _sels_sels_0_T_85 ? 100'h2000000000000000000000 : _sels_sels_0_T_113; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_115 = _sels_sels_0_T_84 ? 100'h1000000000000000000000 : _sels_sels_0_T_114; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_116 = _sels_sels_0_T_83 ? 100'h800000000000000000000 : _sels_sels_0_T_115; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_117 = _sels_sels_0_T_82 ? 100'h400000000000000000000 : _sels_sels_0_T_116; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_118 = _sels_sels_0_T_81 ? 100'h200000000000000000000 : _sels_sels_0_T_117; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_119 = _sels_sels_0_T_80 ? 100'h100000000000000000000 : _sels_sels_0_T_118; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_120 = _sels_sels_0_T_79 ? 100'h80000000000000000000 : _sels_sels_0_T_119; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_121 = _sels_sels_0_T_78 ? 100'h40000000000000000000 : _sels_sels_0_T_120; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_122 = _sels_sels_0_T_77 ? 100'h20000000000000000000 : _sels_sels_0_T_121; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_123 = _sels_sels_0_T_76 ? 100'h10000000000000000000 : _sels_sels_0_T_122; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_124 = _sels_sels_0_T_75 ? 100'h8000000000000000000 : _sels_sels_0_T_123; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_125 = _sels_sels_0_T_74 ? 100'h4000000000000000000 : _sels_sels_0_T_124; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_126 = _sels_sels_0_T_73 ? 100'h2000000000000000000 : _sels_sels_0_T_125; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_127 = _sels_sels_0_T_72 ? 100'h1000000000000000000 : _sels_sels_0_T_126; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_128 = _sels_sels_0_T_71 ? 100'h800000000000000000 : _sels_sels_0_T_127; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_129 = _sels_sels_0_T_70 ? 100'h400000000000000000 : _sels_sels_0_T_128; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_130 = _sels_sels_0_T_69 ? 100'h200000000000000000 : _sels_sels_0_T_129; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_131 = _sels_sels_0_T_68 ? 100'h100000000000000000 : _sels_sels_0_T_130; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_132 = _sels_sels_0_T_67 ? 100'h80000000000000000 : _sels_sels_0_T_131; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_133 = _sels_sels_0_T_66 ? 100'h40000000000000000 : _sels_sels_0_T_132; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_134 = _sels_sels_0_T_65 ? 100'h20000000000000000 : _sels_sels_0_T_133; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_135 = _sels_sels_0_T_64 ? 100'h10000000000000000 : _sels_sels_0_T_134; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_136 = _sels_sels_0_T_63 ? 100'h8000000000000000 : _sels_sels_0_T_135; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_137 = _sels_sels_0_T_62 ? 100'h4000000000000000 : _sels_sels_0_T_136; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_138 = _sels_sels_0_T_61 ? 100'h2000000000000000 : _sels_sels_0_T_137; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_139 = _sels_sels_0_T_60 ? 100'h1000000000000000 : _sels_sels_0_T_138; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_140 = _sels_sels_0_T_59 ? 100'h800000000000000 : _sels_sels_0_T_139; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_141 = _sels_sels_0_T_58 ? 100'h400000000000000 : _sels_sels_0_T_140; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_142 = _sels_sels_0_T_57 ? 100'h200000000000000 : _sels_sels_0_T_141; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_143 = _sels_sels_0_T_56 ? 100'h100000000000000 : _sels_sels_0_T_142; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_144 = _sels_sels_0_T_55 ? 100'h80000000000000 : _sels_sels_0_T_143; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_145 = _sels_sels_0_T_54 ? 100'h40000000000000 : _sels_sels_0_T_144; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_146 = _sels_sels_0_T_53 ? 100'h20000000000000 : _sels_sels_0_T_145; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_147 = _sels_sels_0_T_52 ? 100'h10000000000000 : _sels_sels_0_T_146; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_148 = _sels_sels_0_T_51 ? 100'h8000000000000 : _sels_sels_0_T_147; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_149 = _sels_sels_0_T_50 ? 100'h4000000000000 : _sels_sels_0_T_148; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_150 = _sels_sels_0_T_49 ? 100'h2000000000000 : _sels_sels_0_T_149; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_151 = _sels_sels_0_T_48 ? 100'h1000000000000 : _sels_sels_0_T_150; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_152 = _sels_sels_0_T_47 ? 100'h800000000000 : _sels_sels_0_T_151; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_153 = _sels_sels_0_T_46 ? 100'h400000000000 : _sels_sels_0_T_152; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_154 = _sels_sels_0_T_45 ? 100'h200000000000 : _sels_sels_0_T_153; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_155 = _sels_sels_0_T_44 ? 100'h100000000000 : _sels_sels_0_T_154; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_156 = _sels_sels_0_T_43 ? 100'h80000000000 : _sels_sels_0_T_155; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_157 = _sels_sels_0_T_42 ? 100'h40000000000 : _sels_sels_0_T_156; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_158 = _sels_sels_0_T_41 ? 100'h20000000000 : _sels_sels_0_T_157; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_159 = _sels_sels_0_T_40 ? 100'h10000000000 : _sels_sels_0_T_158; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_160 = _sels_sels_0_T_39 ? 100'h8000000000 : _sels_sels_0_T_159; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_161 = _sels_sels_0_T_38 ? 100'h4000000000 : _sels_sels_0_T_160; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_162 = _sels_sels_0_T_37 ? 100'h2000000000 : _sels_sels_0_T_161; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_163 = _sels_sels_0_T_36 ? 100'h1000000000 : _sels_sels_0_T_162; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_164 = _sels_sels_0_T_35 ? 100'h800000000 : _sels_sels_0_T_163; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_165 = _sels_sels_0_T_34 ? 100'h400000000 : _sels_sels_0_T_164; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_166 = _sels_sels_0_T_33 ? 100'h200000000 : _sels_sels_0_T_165; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_167 = _sels_sels_0_T_32 ? 100'h100000000 : _sels_sels_0_T_166; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_168 = _sels_sels_0_T_31 ? 100'h80000000 : _sels_sels_0_T_167; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_169 = _sels_sels_0_T_30 ? 100'h40000000 : _sels_sels_0_T_168; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_170 = _sels_sels_0_T_29 ? 100'h20000000 : _sels_sels_0_T_169; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_171 = _sels_sels_0_T_28 ? 100'h10000000 : _sels_sels_0_T_170; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_172 = _sels_sels_0_T_27 ? 100'h8000000 : _sels_sels_0_T_171; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_173 = _sels_sels_0_T_26 ? 100'h4000000 : _sels_sels_0_T_172; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_174 = _sels_sels_0_T_25 ? 100'h2000000 : _sels_sels_0_T_173; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_175 = _sels_sels_0_T_24 ? 100'h1000000 : _sels_sels_0_T_174; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_176 = _sels_sels_0_T_23 ? 100'h800000 : _sels_sels_0_T_175; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_177 = _sels_sels_0_T_22 ? 100'h400000 : _sels_sels_0_T_176; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_178 = _sels_sels_0_T_21 ? 100'h200000 : _sels_sels_0_T_177; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_179 = _sels_sels_0_T_20 ? 100'h100000 : _sels_sels_0_T_178; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_180 = _sels_sels_0_T_19 ? 100'h80000 : _sels_sels_0_T_179; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_181 = _sels_sels_0_T_18 ? 100'h40000 : _sels_sels_0_T_180; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_182 = _sels_sels_0_T_17 ? 100'h20000 : _sels_sels_0_T_181; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_183 = _sels_sels_0_T_16 ? 100'h10000 : _sels_sels_0_T_182; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_184 = _sels_sels_0_T_15 ? 100'h8000 : _sels_sels_0_T_183; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_185 = _sels_sels_0_T_14 ? 100'h4000 : _sels_sels_0_T_184; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_186 = _sels_sels_0_T_13 ? 100'h2000 : _sels_sels_0_T_185; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_187 = _sels_sels_0_T_12 ? 100'h1000 : _sels_sels_0_T_186; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_188 = _sels_sels_0_T_11 ? 100'h800 : _sels_sels_0_T_187; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_189 = _sels_sels_0_T_10 ? 100'h400 : _sels_sels_0_T_188; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_190 = _sels_sels_0_T_9 ? 100'h200 : _sels_sels_0_T_189; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_191 = _sels_sels_0_T_8 ? 100'h100 : _sels_sels_0_T_190; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_192 = _sels_sels_0_T_7 ? 100'h80 : _sels_sels_0_T_191; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_193 = _sels_sels_0_T_6 ? 100'h40 : _sels_sels_0_T_192; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_194 = _sels_sels_0_T_5 ? 100'h20 : _sels_sels_0_T_193; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_195 = _sels_sels_0_T_4 ? 100'h10 : _sels_sels_0_T_194; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_196 = _sels_sels_0_T_3 ? 100'h8 : _sels_sels_0_T_195; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_197 = _sels_sels_0_T_2 ? 100'h4 : _sels_sels_0_T_196; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_0_T_198 = _sels_sels_0_T_1 ? 100'h2 : _sels_sels_0_T_197; // @[OneHot.scala:85:71] assign _sels_sels_0_T_199 = _sels_sels_0_T ? 100'h1 : _sels_sels_0_T_198; // @[OneHot.scala:85:71] assign sels_0 = _sels_sels_0_T_199; // @[Mux.scala:50:70] wire [99:0] _sels_T = ~sels_0; // @[util.scala:405:20, :410:21] wire [99:0] _sels_T_1 = free_list & _sels_T; // @[util.scala:410:{19,21}] wire _sels_sels_1_T = _sels_T_1[0]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_1 = _sels_T_1[1]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_2 = _sels_T_1[2]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_3 = _sels_T_1[3]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_4 = _sels_T_1[4]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_5 = _sels_T_1[5]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_6 = _sels_T_1[6]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_7 = _sels_T_1[7]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_8 = _sels_T_1[8]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_9 = _sels_T_1[9]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_10 = _sels_T_1[10]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_11 = _sels_T_1[11]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_12 = _sels_T_1[12]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_13 = _sels_T_1[13]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_14 = _sels_T_1[14]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_15 = _sels_T_1[15]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_16 = _sels_T_1[16]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_17 = _sels_T_1[17]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_18 = _sels_T_1[18]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_19 = _sels_T_1[19]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_20 = _sels_T_1[20]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_21 = _sels_T_1[21]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_22 = _sels_T_1[22]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_23 = _sels_T_1[23]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_24 = _sels_T_1[24]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_25 = _sels_T_1[25]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_26 = _sels_T_1[26]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_27 = _sels_T_1[27]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_28 = _sels_T_1[28]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_29 = _sels_T_1[29]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_30 = _sels_T_1[30]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_31 = _sels_T_1[31]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_32 = _sels_T_1[32]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_33 = _sels_T_1[33]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_34 = _sels_T_1[34]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_35 = _sels_T_1[35]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_36 = _sels_T_1[36]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_37 = _sels_T_1[37]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_38 = _sels_T_1[38]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_39 = _sels_T_1[39]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_40 = _sels_T_1[40]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_41 = _sels_T_1[41]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_42 = _sels_T_1[42]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_43 = _sels_T_1[43]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_44 = _sels_T_1[44]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_45 = _sels_T_1[45]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_46 = _sels_T_1[46]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_47 = _sels_T_1[47]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_48 = _sels_T_1[48]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_49 = _sels_T_1[49]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_50 = _sels_T_1[50]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_51 = _sels_T_1[51]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_52 = _sels_T_1[52]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_53 = _sels_T_1[53]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_54 = _sels_T_1[54]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_55 = _sels_T_1[55]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_56 = _sels_T_1[56]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_57 = _sels_T_1[57]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_58 = _sels_T_1[58]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_59 = _sels_T_1[59]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_60 = _sels_T_1[60]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_61 = _sels_T_1[61]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_62 = _sels_T_1[62]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_63 = _sels_T_1[63]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_64 = _sels_T_1[64]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_65 = _sels_T_1[65]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_66 = _sels_T_1[66]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_67 = _sels_T_1[67]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_68 = _sels_T_1[68]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_69 = _sels_T_1[69]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_70 = _sels_T_1[70]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_71 = _sels_T_1[71]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_72 = _sels_T_1[72]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_73 = _sels_T_1[73]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_74 = _sels_T_1[74]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_75 = _sels_T_1[75]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_76 = _sels_T_1[76]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_77 = _sels_T_1[77]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_78 = _sels_T_1[78]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_79 = _sels_T_1[79]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_80 = _sels_T_1[80]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_81 = _sels_T_1[81]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_82 = _sels_T_1[82]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_83 = _sels_T_1[83]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_84 = _sels_T_1[84]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_85 = _sels_T_1[85]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_86 = _sels_T_1[86]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_87 = _sels_T_1[87]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_88 = _sels_T_1[88]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_89 = _sels_T_1[89]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_90 = _sels_T_1[90]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_91 = _sels_T_1[91]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_92 = _sels_T_1[92]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_93 = _sels_T_1[93]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_94 = _sels_T_1[94]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_95 = _sels_T_1[95]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_96 = _sels_T_1[96]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_97 = _sels_T_1[97]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_98 = _sels_T_1[98]; // @[OneHot.scala:85:71] wire _sels_sels_1_T_99 = _sels_T_1[99]; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_100 = {_sels_sels_1_T_99, 99'h0}; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_101 = _sels_sels_1_T_98 ? 100'h4000000000000000000000000 : _sels_sels_1_T_100; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_102 = _sels_sels_1_T_97 ? 100'h2000000000000000000000000 : _sels_sels_1_T_101; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_103 = _sels_sels_1_T_96 ? 100'h1000000000000000000000000 : _sels_sels_1_T_102; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_104 = _sels_sels_1_T_95 ? 100'h800000000000000000000000 : _sels_sels_1_T_103; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_105 = _sels_sels_1_T_94 ? 100'h400000000000000000000000 : _sels_sels_1_T_104; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_106 = _sels_sels_1_T_93 ? 100'h200000000000000000000000 : _sels_sels_1_T_105; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_107 = _sels_sels_1_T_92 ? 100'h100000000000000000000000 : _sels_sels_1_T_106; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_108 = _sels_sels_1_T_91 ? 100'h80000000000000000000000 : _sels_sels_1_T_107; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_109 = _sels_sels_1_T_90 ? 100'h40000000000000000000000 : _sels_sels_1_T_108; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_110 = _sels_sels_1_T_89 ? 100'h20000000000000000000000 : _sels_sels_1_T_109; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_111 = _sels_sels_1_T_88 ? 100'h10000000000000000000000 : _sels_sels_1_T_110; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_112 = _sels_sels_1_T_87 ? 100'h8000000000000000000000 : _sels_sels_1_T_111; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_113 = _sels_sels_1_T_86 ? 100'h4000000000000000000000 : _sels_sels_1_T_112; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_114 = _sels_sels_1_T_85 ? 100'h2000000000000000000000 : _sels_sels_1_T_113; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_115 = _sels_sels_1_T_84 ? 100'h1000000000000000000000 : _sels_sels_1_T_114; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_116 = _sels_sels_1_T_83 ? 100'h800000000000000000000 : _sels_sels_1_T_115; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_117 = _sels_sels_1_T_82 ? 100'h400000000000000000000 : _sels_sels_1_T_116; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_118 = _sels_sels_1_T_81 ? 100'h200000000000000000000 : _sels_sels_1_T_117; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_119 = _sels_sels_1_T_80 ? 100'h100000000000000000000 : _sels_sels_1_T_118; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_120 = _sels_sels_1_T_79 ? 100'h80000000000000000000 : _sels_sels_1_T_119; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_121 = _sels_sels_1_T_78 ? 100'h40000000000000000000 : _sels_sels_1_T_120; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_122 = _sels_sels_1_T_77 ? 100'h20000000000000000000 : _sels_sels_1_T_121; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_123 = _sels_sels_1_T_76 ? 100'h10000000000000000000 : _sels_sels_1_T_122; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_124 = _sels_sels_1_T_75 ? 100'h8000000000000000000 : _sels_sels_1_T_123; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_125 = _sels_sels_1_T_74 ? 100'h4000000000000000000 : _sels_sels_1_T_124; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_126 = _sels_sels_1_T_73 ? 100'h2000000000000000000 : _sels_sels_1_T_125; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_127 = _sels_sels_1_T_72 ? 100'h1000000000000000000 : _sels_sels_1_T_126; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_128 = _sels_sels_1_T_71 ? 100'h800000000000000000 : _sels_sels_1_T_127; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_129 = _sels_sels_1_T_70 ? 100'h400000000000000000 : _sels_sels_1_T_128; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_130 = _sels_sels_1_T_69 ? 100'h200000000000000000 : _sels_sels_1_T_129; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_131 = _sels_sels_1_T_68 ? 100'h100000000000000000 : _sels_sels_1_T_130; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_132 = _sels_sels_1_T_67 ? 100'h80000000000000000 : _sels_sels_1_T_131; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_133 = _sels_sels_1_T_66 ? 100'h40000000000000000 : _sels_sels_1_T_132; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_134 = _sels_sels_1_T_65 ? 100'h20000000000000000 : _sels_sels_1_T_133; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_135 = _sels_sels_1_T_64 ? 100'h10000000000000000 : _sels_sels_1_T_134; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_136 = _sels_sels_1_T_63 ? 100'h8000000000000000 : _sels_sels_1_T_135; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_137 = _sels_sels_1_T_62 ? 100'h4000000000000000 : _sels_sels_1_T_136; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_138 = _sels_sels_1_T_61 ? 100'h2000000000000000 : _sels_sels_1_T_137; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_139 = _sels_sels_1_T_60 ? 100'h1000000000000000 : _sels_sels_1_T_138; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_140 = _sels_sels_1_T_59 ? 100'h800000000000000 : _sels_sels_1_T_139; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_141 = _sels_sels_1_T_58 ? 100'h400000000000000 : _sels_sels_1_T_140; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_142 = _sels_sels_1_T_57 ? 100'h200000000000000 : _sels_sels_1_T_141; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_143 = _sels_sels_1_T_56 ? 100'h100000000000000 : _sels_sels_1_T_142; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_144 = _sels_sels_1_T_55 ? 100'h80000000000000 : _sels_sels_1_T_143; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_145 = _sels_sels_1_T_54 ? 100'h40000000000000 : _sels_sels_1_T_144; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_146 = _sels_sels_1_T_53 ? 100'h20000000000000 : _sels_sels_1_T_145; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_147 = _sels_sels_1_T_52 ? 100'h10000000000000 : _sels_sels_1_T_146; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_148 = _sels_sels_1_T_51 ? 100'h8000000000000 : _sels_sels_1_T_147; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_149 = _sels_sels_1_T_50 ? 100'h4000000000000 : _sels_sels_1_T_148; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_150 = _sels_sels_1_T_49 ? 100'h2000000000000 : _sels_sels_1_T_149; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_151 = _sels_sels_1_T_48 ? 100'h1000000000000 : _sels_sels_1_T_150; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_152 = _sels_sels_1_T_47 ? 100'h800000000000 : _sels_sels_1_T_151; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_153 = _sels_sels_1_T_46 ? 100'h400000000000 : _sels_sels_1_T_152; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_154 = _sels_sels_1_T_45 ? 100'h200000000000 : _sels_sels_1_T_153; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_155 = _sels_sels_1_T_44 ? 100'h100000000000 : _sels_sels_1_T_154; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_156 = _sels_sels_1_T_43 ? 100'h80000000000 : _sels_sels_1_T_155; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_157 = _sels_sels_1_T_42 ? 100'h40000000000 : _sels_sels_1_T_156; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_158 = _sels_sels_1_T_41 ? 100'h20000000000 : _sels_sels_1_T_157; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_159 = _sels_sels_1_T_40 ? 100'h10000000000 : _sels_sels_1_T_158; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_160 = _sels_sels_1_T_39 ? 100'h8000000000 : _sels_sels_1_T_159; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_161 = _sels_sels_1_T_38 ? 100'h4000000000 : _sels_sels_1_T_160; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_162 = _sels_sels_1_T_37 ? 100'h2000000000 : _sels_sels_1_T_161; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_163 = _sels_sels_1_T_36 ? 100'h1000000000 : _sels_sels_1_T_162; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_164 = _sels_sels_1_T_35 ? 100'h800000000 : _sels_sels_1_T_163; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_165 = _sels_sels_1_T_34 ? 100'h400000000 : _sels_sels_1_T_164; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_166 = _sels_sels_1_T_33 ? 100'h200000000 : _sels_sels_1_T_165; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_167 = _sels_sels_1_T_32 ? 100'h100000000 : _sels_sels_1_T_166; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_168 = _sels_sels_1_T_31 ? 100'h80000000 : _sels_sels_1_T_167; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_169 = _sels_sels_1_T_30 ? 100'h40000000 : _sels_sels_1_T_168; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_170 = _sels_sels_1_T_29 ? 100'h20000000 : _sels_sels_1_T_169; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_171 = _sels_sels_1_T_28 ? 100'h10000000 : _sels_sels_1_T_170; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_172 = _sels_sels_1_T_27 ? 100'h8000000 : _sels_sels_1_T_171; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_173 = _sels_sels_1_T_26 ? 100'h4000000 : _sels_sels_1_T_172; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_174 = _sels_sels_1_T_25 ? 100'h2000000 : _sels_sels_1_T_173; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_175 = _sels_sels_1_T_24 ? 100'h1000000 : _sels_sels_1_T_174; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_176 = _sels_sels_1_T_23 ? 100'h800000 : _sels_sels_1_T_175; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_177 = _sels_sels_1_T_22 ? 100'h400000 : _sels_sels_1_T_176; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_178 = _sels_sels_1_T_21 ? 100'h200000 : _sels_sels_1_T_177; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_179 = _sels_sels_1_T_20 ? 100'h100000 : _sels_sels_1_T_178; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_180 = _sels_sels_1_T_19 ? 100'h80000 : _sels_sels_1_T_179; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_181 = _sels_sels_1_T_18 ? 100'h40000 : _sels_sels_1_T_180; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_182 = _sels_sels_1_T_17 ? 100'h20000 : _sels_sels_1_T_181; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_183 = _sels_sels_1_T_16 ? 100'h10000 : _sels_sels_1_T_182; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_184 = _sels_sels_1_T_15 ? 100'h8000 : _sels_sels_1_T_183; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_185 = _sels_sels_1_T_14 ? 100'h4000 : _sels_sels_1_T_184; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_186 = _sels_sels_1_T_13 ? 100'h2000 : _sels_sels_1_T_185; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_187 = _sels_sels_1_T_12 ? 100'h1000 : _sels_sels_1_T_186; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_188 = _sels_sels_1_T_11 ? 100'h800 : _sels_sels_1_T_187; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_189 = _sels_sels_1_T_10 ? 100'h400 : _sels_sels_1_T_188; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_190 = _sels_sels_1_T_9 ? 100'h200 : _sels_sels_1_T_189; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_191 = _sels_sels_1_T_8 ? 100'h100 : _sels_sels_1_T_190; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_192 = _sels_sels_1_T_7 ? 100'h80 : _sels_sels_1_T_191; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_193 = _sels_sels_1_T_6 ? 100'h40 : _sels_sels_1_T_192; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_194 = _sels_sels_1_T_5 ? 100'h20 : _sels_sels_1_T_193; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_195 = _sels_sels_1_T_4 ? 100'h10 : _sels_sels_1_T_194; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_196 = _sels_sels_1_T_3 ? 100'h8 : _sels_sels_1_T_195; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_197 = _sels_sels_1_T_2 ? 100'h4 : _sels_sels_1_T_196; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_1_T_198 = _sels_sels_1_T_1 ? 100'h2 : _sels_sels_1_T_197; // @[OneHot.scala:85:71] assign _sels_sels_1_T_199 = _sels_sels_1_T ? 100'h1 : _sels_sels_1_T_198; // @[OneHot.scala:85:71] assign sels_1 = _sels_sels_1_T_199; // @[Mux.scala:50:70] wire [99:0] _sels_T_2 = ~sels_1; // @[util.scala:405:20, :410:21] wire [99:0] _sels_T_3 = _sels_T_1 & _sels_T_2; // @[util.scala:410:{19,21}] wire _sels_sels_2_T = _sels_T_3[0]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_1 = _sels_T_3[1]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_2 = _sels_T_3[2]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_3 = _sels_T_3[3]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_4 = _sels_T_3[4]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_5 = _sels_T_3[5]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_6 = _sels_T_3[6]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_7 = _sels_T_3[7]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_8 = _sels_T_3[8]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_9 = _sels_T_3[9]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_10 = _sels_T_3[10]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_11 = _sels_T_3[11]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_12 = _sels_T_3[12]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_13 = _sels_T_3[13]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_14 = _sels_T_3[14]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_15 = _sels_T_3[15]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_16 = _sels_T_3[16]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_17 = _sels_T_3[17]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_18 = _sels_T_3[18]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_19 = _sels_T_3[19]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_20 = _sels_T_3[20]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_21 = _sels_T_3[21]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_22 = _sels_T_3[22]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_23 = _sels_T_3[23]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_24 = _sels_T_3[24]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_25 = _sels_T_3[25]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_26 = _sels_T_3[26]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_27 = _sels_T_3[27]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_28 = _sels_T_3[28]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_29 = _sels_T_3[29]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_30 = _sels_T_3[30]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_31 = _sels_T_3[31]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_32 = _sels_T_3[32]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_33 = _sels_T_3[33]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_34 = _sels_T_3[34]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_35 = _sels_T_3[35]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_36 = _sels_T_3[36]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_37 = _sels_T_3[37]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_38 = _sels_T_3[38]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_39 = _sels_T_3[39]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_40 = _sels_T_3[40]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_41 = _sels_T_3[41]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_42 = _sels_T_3[42]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_43 = _sels_T_3[43]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_44 = _sels_T_3[44]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_45 = _sels_T_3[45]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_46 = _sels_T_3[46]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_47 = _sels_T_3[47]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_48 = _sels_T_3[48]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_49 = _sels_T_3[49]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_50 = _sels_T_3[50]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_51 = _sels_T_3[51]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_52 = _sels_T_3[52]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_53 = _sels_T_3[53]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_54 = _sels_T_3[54]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_55 = _sels_T_3[55]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_56 = _sels_T_3[56]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_57 = _sels_T_3[57]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_58 = _sels_T_3[58]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_59 = _sels_T_3[59]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_60 = _sels_T_3[60]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_61 = _sels_T_3[61]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_62 = _sels_T_3[62]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_63 = _sels_T_3[63]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_64 = _sels_T_3[64]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_65 = _sels_T_3[65]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_66 = _sels_T_3[66]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_67 = _sels_T_3[67]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_68 = _sels_T_3[68]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_69 = _sels_T_3[69]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_70 = _sels_T_3[70]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_71 = _sels_T_3[71]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_72 = _sels_T_3[72]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_73 = _sels_T_3[73]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_74 = _sels_T_3[74]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_75 = _sels_T_3[75]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_76 = _sels_T_3[76]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_77 = _sels_T_3[77]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_78 = _sels_T_3[78]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_79 = _sels_T_3[79]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_80 = _sels_T_3[80]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_81 = _sels_T_3[81]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_82 = _sels_T_3[82]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_83 = _sels_T_3[83]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_84 = _sels_T_3[84]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_85 = _sels_T_3[85]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_86 = _sels_T_3[86]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_87 = _sels_T_3[87]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_88 = _sels_T_3[88]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_89 = _sels_T_3[89]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_90 = _sels_T_3[90]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_91 = _sels_T_3[91]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_92 = _sels_T_3[92]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_93 = _sels_T_3[93]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_94 = _sels_T_3[94]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_95 = _sels_T_3[95]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_96 = _sels_T_3[96]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_97 = _sels_T_3[97]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_98 = _sels_T_3[98]; // @[OneHot.scala:85:71] wire _sels_sels_2_T_99 = _sels_T_3[99]; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_100 = {_sels_sels_2_T_99, 99'h0}; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_101 = _sels_sels_2_T_98 ? 100'h4000000000000000000000000 : _sels_sels_2_T_100; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_102 = _sels_sels_2_T_97 ? 100'h2000000000000000000000000 : _sels_sels_2_T_101; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_103 = _sels_sels_2_T_96 ? 100'h1000000000000000000000000 : _sels_sels_2_T_102; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_104 = _sels_sels_2_T_95 ? 100'h800000000000000000000000 : _sels_sels_2_T_103; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_105 = _sels_sels_2_T_94 ? 100'h400000000000000000000000 : _sels_sels_2_T_104; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_106 = _sels_sels_2_T_93 ? 100'h200000000000000000000000 : _sels_sels_2_T_105; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_107 = _sels_sels_2_T_92 ? 100'h100000000000000000000000 : _sels_sels_2_T_106; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_108 = _sels_sels_2_T_91 ? 100'h80000000000000000000000 : _sels_sels_2_T_107; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_109 = _sels_sels_2_T_90 ? 100'h40000000000000000000000 : _sels_sels_2_T_108; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_110 = _sels_sels_2_T_89 ? 100'h20000000000000000000000 : _sels_sels_2_T_109; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_111 = _sels_sels_2_T_88 ? 100'h10000000000000000000000 : _sels_sels_2_T_110; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_112 = _sels_sels_2_T_87 ? 100'h8000000000000000000000 : _sels_sels_2_T_111; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_113 = _sels_sels_2_T_86 ? 100'h4000000000000000000000 : _sels_sels_2_T_112; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_114 = _sels_sels_2_T_85 ? 100'h2000000000000000000000 : _sels_sels_2_T_113; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_115 = _sels_sels_2_T_84 ? 100'h1000000000000000000000 : _sels_sels_2_T_114; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_116 = _sels_sels_2_T_83 ? 100'h800000000000000000000 : _sels_sels_2_T_115; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_117 = _sels_sels_2_T_82 ? 100'h400000000000000000000 : _sels_sels_2_T_116; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_118 = _sels_sels_2_T_81 ? 100'h200000000000000000000 : _sels_sels_2_T_117; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_119 = _sels_sels_2_T_80 ? 100'h100000000000000000000 : _sels_sels_2_T_118; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_120 = _sels_sels_2_T_79 ? 100'h80000000000000000000 : _sels_sels_2_T_119; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_121 = _sels_sels_2_T_78 ? 100'h40000000000000000000 : _sels_sels_2_T_120; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_122 = _sels_sels_2_T_77 ? 100'h20000000000000000000 : _sels_sels_2_T_121; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_123 = _sels_sels_2_T_76 ? 100'h10000000000000000000 : _sels_sels_2_T_122; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_124 = _sels_sels_2_T_75 ? 100'h8000000000000000000 : _sels_sels_2_T_123; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_125 = _sels_sels_2_T_74 ? 100'h4000000000000000000 : _sels_sels_2_T_124; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_126 = _sels_sels_2_T_73 ? 100'h2000000000000000000 : _sels_sels_2_T_125; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_127 = _sels_sels_2_T_72 ? 100'h1000000000000000000 : _sels_sels_2_T_126; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_128 = _sels_sels_2_T_71 ? 100'h800000000000000000 : _sels_sels_2_T_127; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_129 = _sels_sels_2_T_70 ? 100'h400000000000000000 : _sels_sels_2_T_128; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_130 = _sels_sels_2_T_69 ? 100'h200000000000000000 : _sels_sels_2_T_129; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_131 = _sels_sels_2_T_68 ? 100'h100000000000000000 : _sels_sels_2_T_130; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_132 = _sels_sels_2_T_67 ? 100'h80000000000000000 : _sels_sels_2_T_131; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_133 = _sels_sels_2_T_66 ? 100'h40000000000000000 : _sels_sels_2_T_132; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_134 = _sels_sels_2_T_65 ? 100'h20000000000000000 : _sels_sels_2_T_133; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_135 = _sels_sels_2_T_64 ? 100'h10000000000000000 : _sels_sels_2_T_134; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_136 = _sels_sels_2_T_63 ? 100'h8000000000000000 : _sels_sels_2_T_135; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_137 = _sels_sels_2_T_62 ? 100'h4000000000000000 : _sels_sels_2_T_136; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_138 = _sels_sels_2_T_61 ? 100'h2000000000000000 : _sels_sels_2_T_137; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_139 = _sels_sels_2_T_60 ? 100'h1000000000000000 : _sels_sels_2_T_138; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_140 = _sels_sels_2_T_59 ? 100'h800000000000000 : _sels_sels_2_T_139; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_141 = _sels_sels_2_T_58 ? 100'h400000000000000 : _sels_sels_2_T_140; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_142 = _sels_sels_2_T_57 ? 100'h200000000000000 : _sels_sels_2_T_141; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_143 = _sels_sels_2_T_56 ? 100'h100000000000000 : _sels_sels_2_T_142; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_144 = _sels_sels_2_T_55 ? 100'h80000000000000 : _sels_sels_2_T_143; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_145 = _sels_sels_2_T_54 ? 100'h40000000000000 : _sels_sels_2_T_144; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_146 = _sels_sels_2_T_53 ? 100'h20000000000000 : _sels_sels_2_T_145; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_147 = _sels_sels_2_T_52 ? 100'h10000000000000 : _sels_sels_2_T_146; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_148 = _sels_sels_2_T_51 ? 100'h8000000000000 : _sels_sels_2_T_147; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_149 = _sels_sels_2_T_50 ? 100'h4000000000000 : _sels_sels_2_T_148; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_150 = _sels_sels_2_T_49 ? 100'h2000000000000 : _sels_sels_2_T_149; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_151 = _sels_sels_2_T_48 ? 100'h1000000000000 : _sels_sels_2_T_150; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_152 = _sels_sels_2_T_47 ? 100'h800000000000 : _sels_sels_2_T_151; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_153 = _sels_sels_2_T_46 ? 100'h400000000000 : _sels_sels_2_T_152; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_154 = _sels_sels_2_T_45 ? 100'h200000000000 : _sels_sels_2_T_153; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_155 = _sels_sels_2_T_44 ? 100'h100000000000 : _sels_sels_2_T_154; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_156 = _sels_sels_2_T_43 ? 100'h80000000000 : _sels_sels_2_T_155; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_157 = _sels_sels_2_T_42 ? 100'h40000000000 : _sels_sels_2_T_156; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_158 = _sels_sels_2_T_41 ? 100'h20000000000 : _sels_sels_2_T_157; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_159 = _sels_sels_2_T_40 ? 100'h10000000000 : _sels_sels_2_T_158; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_160 = _sels_sels_2_T_39 ? 100'h8000000000 : _sels_sels_2_T_159; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_161 = _sels_sels_2_T_38 ? 100'h4000000000 : _sels_sels_2_T_160; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_162 = _sels_sels_2_T_37 ? 100'h2000000000 : _sels_sels_2_T_161; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_163 = _sels_sels_2_T_36 ? 100'h1000000000 : _sels_sels_2_T_162; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_164 = _sels_sels_2_T_35 ? 100'h800000000 : _sels_sels_2_T_163; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_165 = _sels_sels_2_T_34 ? 100'h400000000 : _sels_sels_2_T_164; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_166 = _sels_sels_2_T_33 ? 100'h200000000 : _sels_sels_2_T_165; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_167 = _sels_sels_2_T_32 ? 100'h100000000 : _sels_sels_2_T_166; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_168 = _sels_sels_2_T_31 ? 100'h80000000 : _sels_sels_2_T_167; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_169 = _sels_sels_2_T_30 ? 100'h40000000 : _sels_sels_2_T_168; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_170 = _sels_sels_2_T_29 ? 100'h20000000 : _sels_sels_2_T_169; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_171 = _sels_sels_2_T_28 ? 100'h10000000 : _sels_sels_2_T_170; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_172 = _sels_sels_2_T_27 ? 100'h8000000 : _sels_sels_2_T_171; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_173 = _sels_sels_2_T_26 ? 100'h4000000 : _sels_sels_2_T_172; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_174 = _sels_sels_2_T_25 ? 100'h2000000 : _sels_sels_2_T_173; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_175 = _sels_sels_2_T_24 ? 100'h1000000 : _sels_sels_2_T_174; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_176 = _sels_sels_2_T_23 ? 100'h800000 : _sels_sels_2_T_175; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_177 = _sels_sels_2_T_22 ? 100'h400000 : _sels_sels_2_T_176; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_178 = _sels_sels_2_T_21 ? 100'h200000 : _sels_sels_2_T_177; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_179 = _sels_sels_2_T_20 ? 100'h100000 : _sels_sels_2_T_178; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_180 = _sels_sels_2_T_19 ? 100'h80000 : _sels_sels_2_T_179; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_181 = _sels_sels_2_T_18 ? 100'h40000 : _sels_sels_2_T_180; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_182 = _sels_sels_2_T_17 ? 100'h20000 : _sels_sels_2_T_181; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_183 = _sels_sels_2_T_16 ? 100'h10000 : _sels_sels_2_T_182; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_184 = _sels_sels_2_T_15 ? 100'h8000 : _sels_sels_2_T_183; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_185 = _sels_sels_2_T_14 ? 100'h4000 : _sels_sels_2_T_184; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_186 = _sels_sels_2_T_13 ? 100'h2000 : _sels_sels_2_T_185; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_187 = _sels_sels_2_T_12 ? 100'h1000 : _sels_sels_2_T_186; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_188 = _sels_sels_2_T_11 ? 100'h800 : _sels_sels_2_T_187; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_189 = _sels_sels_2_T_10 ? 100'h400 : _sels_sels_2_T_188; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_190 = _sels_sels_2_T_9 ? 100'h200 : _sels_sels_2_T_189; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_191 = _sels_sels_2_T_8 ? 100'h100 : _sels_sels_2_T_190; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_192 = _sels_sels_2_T_7 ? 100'h80 : _sels_sels_2_T_191; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_193 = _sels_sels_2_T_6 ? 100'h40 : _sels_sels_2_T_192; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_194 = _sels_sels_2_T_5 ? 100'h20 : _sels_sels_2_T_193; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_195 = _sels_sels_2_T_4 ? 100'h10 : _sels_sels_2_T_194; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_196 = _sels_sels_2_T_3 ? 100'h8 : _sels_sels_2_T_195; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_197 = _sels_sels_2_T_2 ? 100'h4 : _sels_sels_2_T_196; // @[OneHot.scala:85:71] wire [99:0] _sels_sels_2_T_198 = _sels_sels_2_T_1 ? 100'h2 : _sels_sels_2_T_197; // @[OneHot.scala:85:71] assign _sels_sels_2_T_199 = _sels_sels_2_T ? 100'h1 : _sels_sels_2_T_198; // @[OneHot.scala:85:71] assign sels_2 = _sels_sels_2_T_199; // @[Mux.scala:50:70] wire [99:0] _sels_T_4 = ~sels_2; // @[util.scala:405:20, :410:21] wire [99:0] _sels_T_5 = _sels_T_3 & _sels_T_4; // @[util.scala:410:{19,21}] wire _sel_fire_0_T_2; // @[rename-freelist.scala:85:45] wire _sel_fire_1_T_2; // @[rename-freelist.scala:85:45] wire _sel_fire_2_T_2; // @[rename-freelist.scala:85:45] wire sel_fire_0; // @[rename-freelist.scala:55:23] wire sel_fire_1; // @[rename-freelist.scala:55:23] wire sel_fire_2; // @[rename-freelist.scala:55:23] wire [127:0] _GEN = 128'h1 << io_alloc_pregs_0_bits_0; // @[OneHot.scala:58:35] wire [127:0] allocs_0; // @[OneHot.scala:58:35] assign allocs_0 = _GEN; // @[OneHot.scala:58:35] wire [127:0] _io_debug_freelist_T; // @[OneHot.scala:58:35] assign _io_debug_freelist_T = _GEN; // @[OneHot.scala:58:35] wire [127:0] _GEN_0 = 128'h1 << io_alloc_pregs_1_bits_0; // @[OneHot.scala:58:35] wire [127:0] allocs_1; // @[OneHot.scala:58:35] assign allocs_1 = _GEN_0; // @[OneHot.scala:58:35] wire [127:0] _io_debug_freelist_T_3; // @[OneHot.scala:58:35] assign _io_debug_freelist_T_3 = _GEN_0; // @[OneHot.scala:58:35] wire [127:0] _GEN_1 = 128'h1 << io_alloc_pregs_2_bits_0; // @[OneHot.scala:58:35] wire [127:0] allocs_2; // @[OneHot.scala:58:35] assign allocs_2 = _GEN_1; // @[OneHot.scala:58:35] wire [127:0] _io_debug_freelist_T_6; // @[OneHot.scala:58:35] assign _io_debug_freelist_T_6 = _GEN_1; // @[OneHot.scala:58:35] wire [99:0] _alloc_masks_T = {100{io_reqs_2_0}}; // @[rename-freelist.scala:20:7, :59:94] wire [127:0] _alloc_masks_T_1 = {28'h0, allocs_2[99:0] & _alloc_masks_T}; // @[OneHot.scala:58:35] wire [127:0] alloc_masks_2 = _alloc_masks_T_1; // @[rename-freelist.scala:59:{84,88}] wire [99:0] _alloc_masks_T_2 = {100{io_reqs_1_0}}; // @[rename-freelist.scala:20:7, :59:94] wire [127:0] _alloc_masks_T_3 = {28'h0, allocs_1[99:0] & _alloc_masks_T_2}; // @[OneHot.scala:58:35] wire [127:0] alloc_masks_1 = alloc_masks_2 | _alloc_masks_T_3; // @[rename-freelist.scala:59:{84,88}] wire [99:0] _alloc_masks_T_4 = {100{io_reqs_0_0}}; // @[rename-freelist.scala:20:7, :59:94] wire [127:0] _alloc_masks_T_5 = {28'h0, allocs_0[99:0] & _alloc_masks_T_4}; // @[OneHot.scala:58:35] wire [127:0] alloc_masks_0 = alloc_masks_1 | _alloc_masks_T_5; // @[rename-freelist.scala:59:{84,88}] wire [99:0] _sel_mask_T = {100{sel_fire_0}}; // @[rename-freelist.scala:55:23, :62:66] wire [99:0] _sel_mask_T_1 = sels_0 & _sel_mask_T; // @[util.scala:405:20] wire [99:0] _sel_mask_T_2 = {100{sel_fire_1}}; // @[rename-freelist.scala:55:23, :62:66] wire [99:0] _sel_mask_T_3 = sels_1 & _sel_mask_T_2; // @[util.scala:405:20] wire [99:0] _sel_mask_T_4 = {100{sel_fire_2}}; // @[rename-freelist.scala:55:23, :62:66] wire [99:0] _sel_mask_T_5 = sels_2 & _sel_mask_T_4; // @[util.scala:405:20] wire [99:0] _sel_mask_T_6 = _sel_mask_T_1 | _sel_mask_T_3; // @[rename-freelist.scala:62:{60,82}] wire [99:0] sel_mask = _sel_mask_T_6 | _sel_mask_T_5; // @[rename-freelist.scala:62:{60,82}] wire [99:0] _br_deallocs_T = {100{io_brupdate_b2_mispredict_0}}; // @[rename-freelist.scala:20:7, :63:69] wire [15:0][99:0] _GEN_2 = {{br_alloc_lists_15}, {br_alloc_lists_14}, {br_alloc_lists_13}, {br_alloc_lists_12}, {br_alloc_lists_11}, {br_alloc_lists_10}, {br_alloc_lists_9}, {br_alloc_lists_8}, {br_alloc_lists_7}, {br_alloc_lists_6}, {br_alloc_lists_5}, {br_alloc_lists_4}, {br_alloc_lists_3}, {br_alloc_lists_2}, {br_alloc_lists_1}, {br_alloc_lists_0}}; // @[rename-freelist.scala:51:27, :63:63] wire [99:0] br_deallocs = _GEN_2[io_brupdate_b2_uop_br_tag_0] & _br_deallocs_T; // @[rename-freelist.scala:20:7, :63:{63,69}] wire [127:0] _dealloc_mask_T = 128'h1 << io_dealloc_pregs_0_bits_0; // @[OneHot.scala:58:35] wire [99:0] _dealloc_mask_T_1 = _dealloc_mask_T[99:0]; // @[OneHot.scala:58:35] wire [99:0] _dealloc_mask_T_2 = {100{io_dealloc_pregs_0_valid_0}}; // @[rename-freelist.scala:20:7, :64:85] wire [99:0] _dealloc_mask_T_3 = _dealloc_mask_T_1 & _dealloc_mask_T_2; // @[rename-freelist.scala:64:{64,79,85}] wire [127:0] _dealloc_mask_T_4 = 128'h1 << io_dealloc_pregs_1_bits_0; // @[OneHot.scala:58:35] wire [99:0] _dealloc_mask_T_5 = _dealloc_mask_T_4[99:0]; // @[OneHot.scala:58:35] wire [99:0] _dealloc_mask_T_6 = {100{io_dealloc_pregs_1_valid_0}}; // @[rename-freelist.scala:20:7, :64:85] wire [99:0] _dealloc_mask_T_7 = _dealloc_mask_T_5 & _dealloc_mask_T_6; // @[rename-freelist.scala:64:{64,79,85}] wire [127:0] _dealloc_mask_T_8 = 128'h1 << io_dealloc_pregs_2_bits_0; // @[OneHot.scala:58:35] wire [99:0] _dealloc_mask_T_9 = _dealloc_mask_T_8[99:0]; // @[OneHot.scala:58:35] wire [99:0] _dealloc_mask_T_10 = {100{io_dealloc_pregs_2_valid_0}}; // @[rename-freelist.scala:20:7, :64:85] wire [99:0] _dealloc_mask_T_11 = _dealloc_mask_T_9 & _dealloc_mask_T_10; // @[rename-freelist.scala:64:{64,79,85}] wire [99:0] _dealloc_mask_T_12 = _dealloc_mask_T_3 | _dealloc_mask_T_7; // @[rename-freelist.scala:64:{79,106}] wire [99:0] _dealloc_mask_T_13 = _dealloc_mask_T_12 | _dealloc_mask_T_11; // @[rename-freelist.scala:64:{79,106}] wire [99:0] dealloc_mask = _dealloc_mask_T_13 | br_deallocs; // @[rename-freelist.scala:63:63, :64:{106,110}] wire [1:0] br_slots_hi = {_br_slots_WIRE_2, _br_slots_WIRE_1}; // @[rename-freelist.scala:66:{25,64}] wire [2:0] br_slots = {br_slots_hi, _br_slots_WIRE_0}; // @[rename-freelist.scala:66:{25,64}] wire [15:0] _GEN_3 = 16'h1 << io_ren_br_tags_0_bits_0; // @[OneHot.scala:58:35] wire [15:0] _list_req_T; // @[OneHot.scala:58:35] assign _list_req_T = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_7; // @[OneHot.scala:58:35] assign _list_req_T_7 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_14; // @[OneHot.scala:58:35] assign _list_req_T_14 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_21; // @[OneHot.scala:58:35] assign _list_req_T_21 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_28; // @[OneHot.scala:58:35] assign _list_req_T_28 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_35; // @[OneHot.scala:58:35] assign _list_req_T_35 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_42; // @[OneHot.scala:58:35] assign _list_req_T_42 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_49; // @[OneHot.scala:58:35] assign _list_req_T_49 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_56; // @[OneHot.scala:58:35] assign _list_req_T_56 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_63; // @[OneHot.scala:58:35] assign _list_req_T_63 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_70; // @[OneHot.scala:58:35] assign _list_req_T_70 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_77; // @[OneHot.scala:58:35] assign _list_req_T_77 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_84; // @[OneHot.scala:58:35] assign _list_req_T_84 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_91; // @[OneHot.scala:58:35] assign _list_req_T_91 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_98; // @[OneHot.scala:58:35] assign _list_req_T_98 = _GEN_3; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_105; // @[OneHot.scala:58:35] assign _list_req_T_105 = _GEN_3; // @[OneHot.scala:58:35] wire _list_req_T_1 = _list_req_T[0]; // @[OneHot.scala:58:35] wire _list_req_WIRE_0 = _list_req_T_1; // @[rename-freelist.scala:69:{27,72}] wire [15:0] _GEN_4 = 16'h1 << io_ren_br_tags_1_bits_0; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_2; // @[OneHot.scala:58:35] assign _list_req_T_2 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_9; // @[OneHot.scala:58:35] assign _list_req_T_9 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_16; // @[OneHot.scala:58:35] assign _list_req_T_16 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_23; // @[OneHot.scala:58:35] assign _list_req_T_23 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_30; // @[OneHot.scala:58:35] assign _list_req_T_30 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_37; // @[OneHot.scala:58:35] assign _list_req_T_37 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_44; // @[OneHot.scala:58:35] assign _list_req_T_44 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_51; // @[OneHot.scala:58:35] assign _list_req_T_51 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_58; // @[OneHot.scala:58:35] assign _list_req_T_58 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_65; // @[OneHot.scala:58:35] assign _list_req_T_65 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_72; // @[OneHot.scala:58:35] assign _list_req_T_72 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_79; // @[OneHot.scala:58:35] assign _list_req_T_79 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_86; // @[OneHot.scala:58:35] assign _list_req_T_86 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_93; // @[OneHot.scala:58:35] assign _list_req_T_93 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_100; // @[OneHot.scala:58:35] assign _list_req_T_100 = _GEN_4; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_107; // @[OneHot.scala:58:35] assign _list_req_T_107 = _GEN_4; // @[OneHot.scala:58:35] wire _list_req_T_3 = _list_req_T_2[0]; // @[OneHot.scala:58:35] wire _list_req_WIRE_1 = _list_req_T_3; // @[rename-freelist.scala:69:{27,72}] wire [15:0] _GEN_5 = 16'h1 << io_ren_br_tags_2_bits_0; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_4; // @[OneHot.scala:58:35] assign _list_req_T_4 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_11; // @[OneHot.scala:58:35] assign _list_req_T_11 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_18; // @[OneHot.scala:58:35] assign _list_req_T_18 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_25; // @[OneHot.scala:58:35] assign _list_req_T_25 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_32; // @[OneHot.scala:58:35] assign _list_req_T_32 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_39; // @[OneHot.scala:58:35] assign _list_req_T_39 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_46; // @[OneHot.scala:58:35] assign _list_req_T_46 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_53; // @[OneHot.scala:58:35] assign _list_req_T_53 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_60; // @[OneHot.scala:58:35] assign _list_req_T_60 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_67; // @[OneHot.scala:58:35] assign _list_req_T_67 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_74; // @[OneHot.scala:58:35] assign _list_req_T_74 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_81; // @[OneHot.scala:58:35] assign _list_req_T_81 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_88; // @[OneHot.scala:58:35] assign _list_req_T_88 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_95; // @[OneHot.scala:58:35] assign _list_req_T_95 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_102; // @[OneHot.scala:58:35] assign _list_req_T_102 = _GEN_5; // @[OneHot.scala:58:35] wire [15:0] _list_req_T_109; // @[OneHot.scala:58:35] assign _list_req_T_109 = _GEN_5; // @[OneHot.scala:58:35] wire _list_req_T_5 = _list_req_T_4[0]; // @[OneHot.scala:58:35] wire _list_req_WIRE_2 = _list_req_T_5; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi = {_list_req_WIRE_2, _list_req_WIRE_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_6 = {list_req_hi, _list_req_WIRE_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req = _list_req_T_6 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list = |list_req; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_0_T = list_req[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_0_T_1 = list_req[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_0_T_2 = list_req[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_0_T_3 = _br_alloc_lists_0_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_0_T_4 = _br_alloc_lists_0_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_0_T_6 = _br_alloc_lists_0_T_3 | _br_alloc_lists_0_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_0_T_7 = _br_alloc_lists_0_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_0_WIRE = _br_alloc_lists_0_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_0_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_0_T_9 = br_alloc_lists_0 & _br_alloc_lists_0_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_0_T_10 = {28'h0, _br_alloc_lists_0_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_0_T_11 = new_list ? _br_alloc_lists_0_WIRE : _br_alloc_lists_0_T_10; // @[Mux.scala:30:73] wire _list_req_T_8 = _list_req_T_7[1]; // @[OneHot.scala:58:35] wire _list_req_WIRE_1_0 = _list_req_T_8; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_10 = _list_req_T_9[1]; // @[OneHot.scala:58:35] wire _list_req_WIRE_1_1 = _list_req_T_10; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_12 = _list_req_T_11[1]; // @[OneHot.scala:58:35] wire _list_req_WIRE_1_2 = _list_req_T_12; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_1 = {_list_req_WIRE_1_2, _list_req_WIRE_1_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_13 = {list_req_hi_1, _list_req_WIRE_1_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_1 = _list_req_T_13 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_1 = |list_req_1; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_1_T = list_req_1[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_1_T_1 = list_req_1[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_1_T_2 = list_req_1[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_1_T_3 = _br_alloc_lists_1_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_1_T_4 = _br_alloc_lists_1_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_1_T_6 = _br_alloc_lists_1_T_3 | _br_alloc_lists_1_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_1_T_7 = _br_alloc_lists_1_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_1_WIRE = _br_alloc_lists_1_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_1_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_1_T_9 = br_alloc_lists_1 & _br_alloc_lists_1_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_1_T_10 = {28'h0, _br_alloc_lists_1_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_1_T_11 = new_list_1 ? _br_alloc_lists_1_WIRE : _br_alloc_lists_1_T_10; // @[Mux.scala:30:73] wire _list_req_T_15 = _list_req_T_14[2]; // @[OneHot.scala:58:35] wire _list_req_WIRE_2_0 = _list_req_T_15; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_17 = _list_req_T_16[2]; // @[OneHot.scala:58:35] wire _list_req_WIRE_2_1 = _list_req_T_17; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_19 = _list_req_T_18[2]; // @[OneHot.scala:58:35] wire _list_req_WIRE_2_2 = _list_req_T_19; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_2 = {_list_req_WIRE_2_2, _list_req_WIRE_2_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_20 = {list_req_hi_2, _list_req_WIRE_2_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_2 = _list_req_T_20 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_2 = |list_req_2; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_2_T = list_req_2[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_2_T_1 = list_req_2[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_2_T_2 = list_req_2[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_2_T_3 = _br_alloc_lists_2_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_2_T_4 = _br_alloc_lists_2_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_2_T_6 = _br_alloc_lists_2_T_3 | _br_alloc_lists_2_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_2_T_7 = _br_alloc_lists_2_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_2_WIRE = _br_alloc_lists_2_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_2_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_2_T_9 = br_alloc_lists_2 & _br_alloc_lists_2_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_2_T_10 = {28'h0, _br_alloc_lists_2_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_2_T_11 = new_list_2 ? _br_alloc_lists_2_WIRE : _br_alloc_lists_2_T_10; // @[Mux.scala:30:73] wire _list_req_T_22 = _list_req_T_21[3]; // @[OneHot.scala:58:35] wire _list_req_WIRE_3_0 = _list_req_T_22; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_24 = _list_req_T_23[3]; // @[OneHot.scala:58:35] wire _list_req_WIRE_3_1 = _list_req_T_24; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_26 = _list_req_T_25[3]; // @[OneHot.scala:58:35] wire _list_req_WIRE_3_2 = _list_req_T_26; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_3 = {_list_req_WIRE_3_2, _list_req_WIRE_3_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_27 = {list_req_hi_3, _list_req_WIRE_3_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_3 = _list_req_T_27 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_3 = |list_req_3; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_3_T = list_req_3[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_3_T_1 = list_req_3[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_3_T_2 = list_req_3[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_3_T_3 = _br_alloc_lists_3_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_3_T_4 = _br_alloc_lists_3_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_3_T_6 = _br_alloc_lists_3_T_3 | _br_alloc_lists_3_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_3_T_7 = _br_alloc_lists_3_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_3_WIRE = _br_alloc_lists_3_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_3_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_3_T_9 = br_alloc_lists_3 & _br_alloc_lists_3_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_3_T_10 = {28'h0, _br_alloc_lists_3_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_3_T_11 = new_list_3 ? _br_alloc_lists_3_WIRE : _br_alloc_lists_3_T_10; // @[Mux.scala:30:73] wire _list_req_T_29 = _list_req_T_28[4]; // @[OneHot.scala:58:35] wire _list_req_WIRE_4_0 = _list_req_T_29; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_31 = _list_req_T_30[4]; // @[OneHot.scala:58:35] wire _list_req_WIRE_4_1 = _list_req_T_31; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_33 = _list_req_T_32[4]; // @[OneHot.scala:58:35] wire _list_req_WIRE_4_2 = _list_req_T_33; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_4 = {_list_req_WIRE_4_2, _list_req_WIRE_4_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_34 = {list_req_hi_4, _list_req_WIRE_4_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_4 = _list_req_T_34 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_4 = |list_req_4; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_4_T = list_req_4[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_4_T_1 = list_req_4[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_4_T_2 = list_req_4[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_4_T_3 = _br_alloc_lists_4_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_4_T_4 = _br_alloc_lists_4_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_4_T_6 = _br_alloc_lists_4_T_3 | _br_alloc_lists_4_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_4_T_7 = _br_alloc_lists_4_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_4_WIRE = _br_alloc_lists_4_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_4_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_4_T_9 = br_alloc_lists_4 & _br_alloc_lists_4_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_4_T_10 = {28'h0, _br_alloc_lists_4_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_4_T_11 = new_list_4 ? _br_alloc_lists_4_WIRE : _br_alloc_lists_4_T_10; // @[Mux.scala:30:73] wire _list_req_T_36 = _list_req_T_35[5]; // @[OneHot.scala:58:35] wire _list_req_WIRE_5_0 = _list_req_T_36; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_38 = _list_req_T_37[5]; // @[OneHot.scala:58:35] wire _list_req_WIRE_5_1 = _list_req_T_38; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_40 = _list_req_T_39[5]; // @[OneHot.scala:58:35] wire _list_req_WIRE_5_2 = _list_req_T_40; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_5 = {_list_req_WIRE_5_2, _list_req_WIRE_5_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_41 = {list_req_hi_5, _list_req_WIRE_5_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_5 = _list_req_T_41 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_5 = |list_req_5; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_5_T = list_req_5[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_5_T_1 = list_req_5[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_5_T_2 = list_req_5[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_5_T_3 = _br_alloc_lists_5_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_5_T_4 = _br_alloc_lists_5_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_5_T_6 = _br_alloc_lists_5_T_3 | _br_alloc_lists_5_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_5_T_7 = _br_alloc_lists_5_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_5_WIRE = _br_alloc_lists_5_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_5_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_5_T_9 = br_alloc_lists_5 & _br_alloc_lists_5_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_5_T_10 = {28'h0, _br_alloc_lists_5_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_5_T_11 = new_list_5 ? _br_alloc_lists_5_WIRE : _br_alloc_lists_5_T_10; // @[Mux.scala:30:73] wire _list_req_T_43 = _list_req_T_42[6]; // @[OneHot.scala:58:35] wire _list_req_WIRE_6_0 = _list_req_T_43; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_45 = _list_req_T_44[6]; // @[OneHot.scala:58:35] wire _list_req_WIRE_6_1 = _list_req_T_45; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_47 = _list_req_T_46[6]; // @[OneHot.scala:58:35] wire _list_req_WIRE_6_2 = _list_req_T_47; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_6 = {_list_req_WIRE_6_2, _list_req_WIRE_6_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_48 = {list_req_hi_6, _list_req_WIRE_6_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_6 = _list_req_T_48 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_6 = |list_req_6; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_6_T = list_req_6[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_6_T_1 = list_req_6[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_6_T_2 = list_req_6[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_6_T_3 = _br_alloc_lists_6_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_6_T_4 = _br_alloc_lists_6_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_6_T_6 = _br_alloc_lists_6_T_3 | _br_alloc_lists_6_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_6_T_7 = _br_alloc_lists_6_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_6_WIRE = _br_alloc_lists_6_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_6_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_6_T_9 = br_alloc_lists_6 & _br_alloc_lists_6_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_6_T_10 = {28'h0, _br_alloc_lists_6_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_6_T_11 = new_list_6 ? _br_alloc_lists_6_WIRE : _br_alloc_lists_6_T_10; // @[Mux.scala:30:73] wire _list_req_T_50 = _list_req_T_49[7]; // @[OneHot.scala:58:35] wire _list_req_WIRE_7_0 = _list_req_T_50; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_52 = _list_req_T_51[7]; // @[OneHot.scala:58:35] wire _list_req_WIRE_7_1 = _list_req_T_52; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_54 = _list_req_T_53[7]; // @[OneHot.scala:58:35] wire _list_req_WIRE_7_2 = _list_req_T_54; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_7 = {_list_req_WIRE_7_2, _list_req_WIRE_7_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_55 = {list_req_hi_7, _list_req_WIRE_7_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_7 = _list_req_T_55 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_7 = |list_req_7; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_7_T = list_req_7[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_7_T_1 = list_req_7[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_7_T_2 = list_req_7[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_7_T_3 = _br_alloc_lists_7_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_7_T_4 = _br_alloc_lists_7_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_7_T_6 = _br_alloc_lists_7_T_3 | _br_alloc_lists_7_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_7_T_7 = _br_alloc_lists_7_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_7_WIRE = _br_alloc_lists_7_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_7_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_7_T_9 = br_alloc_lists_7 & _br_alloc_lists_7_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_7_T_10 = {28'h0, _br_alloc_lists_7_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_7_T_11 = new_list_7 ? _br_alloc_lists_7_WIRE : _br_alloc_lists_7_T_10; // @[Mux.scala:30:73] wire _list_req_T_57 = _list_req_T_56[8]; // @[OneHot.scala:58:35] wire _list_req_WIRE_8_0 = _list_req_T_57; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_59 = _list_req_T_58[8]; // @[OneHot.scala:58:35] wire _list_req_WIRE_8_1 = _list_req_T_59; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_61 = _list_req_T_60[8]; // @[OneHot.scala:58:35] wire _list_req_WIRE_8_2 = _list_req_T_61; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_8 = {_list_req_WIRE_8_2, _list_req_WIRE_8_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_62 = {list_req_hi_8, _list_req_WIRE_8_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_8 = _list_req_T_62 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_8 = |list_req_8; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_8_T = list_req_8[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_8_T_1 = list_req_8[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_8_T_2 = list_req_8[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_8_T_3 = _br_alloc_lists_8_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_8_T_4 = _br_alloc_lists_8_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_8_T_6 = _br_alloc_lists_8_T_3 | _br_alloc_lists_8_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_8_T_7 = _br_alloc_lists_8_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_8_WIRE = _br_alloc_lists_8_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_8_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_8_T_9 = br_alloc_lists_8 & _br_alloc_lists_8_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_8_T_10 = {28'h0, _br_alloc_lists_8_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_8_T_11 = new_list_8 ? _br_alloc_lists_8_WIRE : _br_alloc_lists_8_T_10; // @[Mux.scala:30:73] wire _list_req_T_64 = _list_req_T_63[9]; // @[OneHot.scala:58:35] wire _list_req_WIRE_9_0 = _list_req_T_64; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_66 = _list_req_T_65[9]; // @[OneHot.scala:58:35] wire _list_req_WIRE_9_1 = _list_req_T_66; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_68 = _list_req_T_67[9]; // @[OneHot.scala:58:35] wire _list_req_WIRE_9_2 = _list_req_T_68; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_9 = {_list_req_WIRE_9_2, _list_req_WIRE_9_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_69 = {list_req_hi_9, _list_req_WIRE_9_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_9 = _list_req_T_69 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_9 = |list_req_9; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_9_T = list_req_9[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_9_T_1 = list_req_9[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_9_T_2 = list_req_9[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_9_T_3 = _br_alloc_lists_9_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_9_T_4 = _br_alloc_lists_9_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_9_T_6 = _br_alloc_lists_9_T_3 | _br_alloc_lists_9_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_9_T_7 = _br_alloc_lists_9_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_9_WIRE = _br_alloc_lists_9_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_9_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_9_T_9 = br_alloc_lists_9 & _br_alloc_lists_9_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_9_T_10 = {28'h0, _br_alloc_lists_9_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_9_T_11 = new_list_9 ? _br_alloc_lists_9_WIRE : _br_alloc_lists_9_T_10; // @[Mux.scala:30:73] wire _list_req_T_71 = _list_req_T_70[10]; // @[OneHot.scala:58:35] wire _list_req_WIRE_10_0 = _list_req_T_71; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_73 = _list_req_T_72[10]; // @[OneHot.scala:58:35] wire _list_req_WIRE_10_1 = _list_req_T_73; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_75 = _list_req_T_74[10]; // @[OneHot.scala:58:35] wire _list_req_WIRE_10_2 = _list_req_T_75; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_10 = {_list_req_WIRE_10_2, _list_req_WIRE_10_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_76 = {list_req_hi_10, _list_req_WIRE_10_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_10 = _list_req_T_76 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_10 = |list_req_10; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_10_T = list_req_10[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_10_T_1 = list_req_10[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_10_T_2 = list_req_10[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_10_T_3 = _br_alloc_lists_10_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_10_T_4 = _br_alloc_lists_10_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_10_T_6 = _br_alloc_lists_10_T_3 | _br_alloc_lists_10_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_10_T_7 = _br_alloc_lists_10_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_10_WIRE = _br_alloc_lists_10_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_10_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_10_T_9 = br_alloc_lists_10 & _br_alloc_lists_10_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_10_T_10 = {28'h0, _br_alloc_lists_10_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_10_T_11 = new_list_10 ? _br_alloc_lists_10_WIRE : _br_alloc_lists_10_T_10; // @[Mux.scala:30:73] wire _list_req_T_78 = _list_req_T_77[11]; // @[OneHot.scala:58:35] wire _list_req_WIRE_11_0 = _list_req_T_78; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_80 = _list_req_T_79[11]; // @[OneHot.scala:58:35] wire _list_req_WIRE_11_1 = _list_req_T_80; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_82 = _list_req_T_81[11]; // @[OneHot.scala:58:35] wire _list_req_WIRE_11_2 = _list_req_T_82; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_11 = {_list_req_WIRE_11_2, _list_req_WIRE_11_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_83 = {list_req_hi_11, _list_req_WIRE_11_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_11 = _list_req_T_83 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_11 = |list_req_11; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_11_T = list_req_11[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_11_T_1 = list_req_11[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_11_T_2 = list_req_11[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_11_T_3 = _br_alloc_lists_11_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_11_T_4 = _br_alloc_lists_11_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_11_T_6 = _br_alloc_lists_11_T_3 | _br_alloc_lists_11_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_11_T_7 = _br_alloc_lists_11_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_11_WIRE = _br_alloc_lists_11_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_11_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_11_T_9 = br_alloc_lists_11 & _br_alloc_lists_11_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_11_T_10 = {28'h0, _br_alloc_lists_11_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_11_T_11 = new_list_11 ? _br_alloc_lists_11_WIRE : _br_alloc_lists_11_T_10; // @[Mux.scala:30:73] wire _list_req_T_85 = _list_req_T_84[12]; // @[OneHot.scala:58:35] wire _list_req_WIRE_12_0 = _list_req_T_85; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_87 = _list_req_T_86[12]; // @[OneHot.scala:58:35] wire _list_req_WIRE_12_1 = _list_req_T_87; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_89 = _list_req_T_88[12]; // @[OneHot.scala:58:35] wire _list_req_WIRE_12_2 = _list_req_T_89; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_12 = {_list_req_WIRE_12_2, _list_req_WIRE_12_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_90 = {list_req_hi_12, _list_req_WIRE_12_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_12 = _list_req_T_90 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_12 = |list_req_12; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_12_T = list_req_12[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_12_T_1 = list_req_12[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_12_T_2 = list_req_12[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_12_T_3 = _br_alloc_lists_12_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_12_T_4 = _br_alloc_lists_12_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_12_T_6 = _br_alloc_lists_12_T_3 | _br_alloc_lists_12_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_12_T_7 = _br_alloc_lists_12_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_12_WIRE = _br_alloc_lists_12_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_12_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_12_T_9 = br_alloc_lists_12 & _br_alloc_lists_12_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_12_T_10 = {28'h0, _br_alloc_lists_12_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_12_T_11 = new_list_12 ? _br_alloc_lists_12_WIRE : _br_alloc_lists_12_T_10; // @[Mux.scala:30:73] wire _list_req_T_92 = _list_req_T_91[13]; // @[OneHot.scala:58:35] wire _list_req_WIRE_13_0 = _list_req_T_92; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_94 = _list_req_T_93[13]; // @[OneHot.scala:58:35] wire _list_req_WIRE_13_1 = _list_req_T_94; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_96 = _list_req_T_95[13]; // @[OneHot.scala:58:35] wire _list_req_WIRE_13_2 = _list_req_T_96; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_13 = {_list_req_WIRE_13_2, _list_req_WIRE_13_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_97 = {list_req_hi_13, _list_req_WIRE_13_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_13 = _list_req_T_97 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_13 = |list_req_13; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_13_T = list_req_13[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_13_T_1 = list_req_13[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_13_T_2 = list_req_13[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_13_T_3 = _br_alloc_lists_13_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_13_T_4 = _br_alloc_lists_13_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_13_T_6 = _br_alloc_lists_13_T_3 | _br_alloc_lists_13_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_13_T_7 = _br_alloc_lists_13_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_13_WIRE = _br_alloc_lists_13_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_13_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_13_T_9 = br_alloc_lists_13 & _br_alloc_lists_13_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_13_T_10 = {28'h0, _br_alloc_lists_13_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_13_T_11 = new_list_13 ? _br_alloc_lists_13_WIRE : _br_alloc_lists_13_T_10; // @[Mux.scala:30:73] wire _list_req_T_99 = _list_req_T_98[14]; // @[OneHot.scala:58:35] wire _list_req_WIRE_14_0 = _list_req_T_99; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_101 = _list_req_T_100[14]; // @[OneHot.scala:58:35] wire _list_req_WIRE_14_1 = _list_req_T_101; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_103 = _list_req_T_102[14]; // @[OneHot.scala:58:35] wire _list_req_WIRE_14_2 = _list_req_T_103; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_14 = {_list_req_WIRE_14_2, _list_req_WIRE_14_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_104 = {list_req_hi_14, _list_req_WIRE_14_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_14 = _list_req_T_104 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_14 = |list_req_14; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_14_T = list_req_14[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_14_T_1 = list_req_14[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_14_T_2 = list_req_14[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_14_T_3 = _br_alloc_lists_14_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_14_T_4 = _br_alloc_lists_14_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_14_T_6 = _br_alloc_lists_14_T_3 | _br_alloc_lists_14_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_14_T_7 = _br_alloc_lists_14_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_14_WIRE = _br_alloc_lists_14_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_14_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_14_T_9 = br_alloc_lists_14 & _br_alloc_lists_14_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_14_T_10 = {28'h0, _br_alloc_lists_14_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_14_T_11 = new_list_14 ? _br_alloc_lists_14_WIRE : _br_alloc_lists_14_T_10; // @[Mux.scala:30:73] wire _list_req_T_106 = _list_req_T_105[15]; // @[OneHot.scala:58:35] wire _list_req_WIRE_15_0 = _list_req_T_106; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_108 = _list_req_T_107[15]; // @[OneHot.scala:58:35] wire _list_req_WIRE_15_1 = _list_req_T_108; // @[rename-freelist.scala:69:{27,72}] wire _list_req_T_110 = _list_req_T_109[15]; // @[OneHot.scala:58:35] wire _list_req_WIRE_15_2 = _list_req_T_110; // @[rename-freelist.scala:69:{27,72}] wire [1:0] list_req_hi_15 = {_list_req_WIRE_15_2, _list_req_WIRE_15_1}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] _list_req_T_111 = {list_req_hi_15, _list_req_WIRE_15_0}; // @[rename-freelist.scala:69:{27,78}] wire [2:0] list_req_15 = _list_req_T_111 & br_slots; // @[rename-freelist.scala:66:64, :69:{78,85}] wire new_list_15 = |list_req_15; // @[rename-freelist.scala:69:85, :70:29] wire _br_alloc_lists_15_T = list_req_15[0]; // @[Mux.scala:32:36] wire _br_alloc_lists_15_T_1 = list_req_15[1]; // @[Mux.scala:32:36] wire _br_alloc_lists_15_T_2 = list_req_15[2]; // @[Mux.scala:32:36] wire [127:0] _br_alloc_lists_15_T_3 = _br_alloc_lists_15_T ? alloc_masks_1 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_15_T_4 = _br_alloc_lists_15_T_1 ? alloc_masks_2 : 128'h0; // @[Mux.scala:30:73, :32:36] wire [127:0] _br_alloc_lists_15_T_6 = _br_alloc_lists_15_T_3 | _br_alloc_lists_15_T_4; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_15_T_7 = _br_alloc_lists_15_T_6; // @[Mux.scala:30:73] wire [127:0] _br_alloc_lists_15_WIRE = _br_alloc_lists_15_T_7; // @[Mux.scala:30:73] wire [99:0] _br_alloc_lists_15_T_8 = ~br_deallocs; // @[rename-freelist.scala:63:63, :72:60] wire [99:0] _br_alloc_lists_15_T_9 = br_alloc_lists_15 & _br_alloc_lists_15_T_8; // @[rename-freelist.scala:51:27, :72:{58,60}] wire [127:0] _br_alloc_lists_15_T_10 = {28'h0, _br_alloc_lists_15_T_9} | alloc_masks_0; // @[rename-freelist.scala:59:84, :72:{58,73}] wire [127:0] _br_alloc_lists_15_T_11 = new_list_15 ? _br_alloc_lists_15_WIRE : _br_alloc_lists_15_T_10; // @[Mux.scala:30:73] wire [99:0] _free_list_T_1 = ~sel_mask; // @[rename-freelist.scala:62:82, :76:29] wire [99:0] _free_list_T_2 = free_list & _free_list_T_1; // @[rename-freelist.scala:50:26, :76:{27,29}] wire [99:0] _free_list_T_3 = _free_list_T_2 | dealloc_mask; // @[rename-freelist.scala:64:110, :76:{27,39}] wire [99:0] _free_list_T_5 = _free_list_T_3 & 100'hFFFFFFFFFFFFFFFFFFFFFFFFE; // @[rename-freelist.scala:76:{39,55}] wire can_sel = |sels_0; // @[util.scala:405:20] reg r_valid; // @[rename-freelist.scala:81:26] assign io_alloc_pregs_0_valid_0 = r_valid; // @[rename-freelist.scala:20:7, :81:26] wire [35:0] r_sel_hi = sels_0[99:64]; // @[OneHot.scala:30:18] wire [63:0] r_sel_lo = sels_0[63:0]; // @[OneHot.scala:31:18] wire _r_sel_T = |r_sel_hi; // @[OneHot.scala:30:18, :32:14] wire [63:0] _r_sel_T_1 = {28'h0, r_sel_hi} | r_sel_lo; // @[OneHot.scala:30:18, :31:18, :32:28] wire [31:0] r_sel_hi_1 = _r_sel_T_1[63:32]; // @[OneHot.scala:30:18, :32:28] wire [31:0] r_sel_lo_1 = _r_sel_T_1[31:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_2 = |r_sel_hi_1; // @[OneHot.scala:30:18, :32:14] wire [31:0] _r_sel_T_3 = r_sel_hi_1 | r_sel_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28] wire [15:0] r_sel_hi_2 = _r_sel_T_3[31:16]; // @[OneHot.scala:30:18, :32:28] wire [15:0] r_sel_lo_2 = _r_sel_T_3[15:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_4 = |r_sel_hi_2; // @[OneHot.scala:30:18, :32:14] wire [15:0] _r_sel_T_5 = r_sel_hi_2 | r_sel_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28] wire [7:0] r_sel_hi_3 = _r_sel_T_5[15:8]; // @[OneHot.scala:30:18, :32:28] wire [7:0] r_sel_lo_3 = _r_sel_T_5[7:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_6 = |r_sel_hi_3; // @[OneHot.scala:30:18, :32:14] wire [7:0] _r_sel_T_7 = r_sel_hi_3 | r_sel_lo_3; // @[OneHot.scala:30:18, :31:18, :32:28] wire [3:0] r_sel_hi_4 = _r_sel_T_7[7:4]; // @[OneHot.scala:30:18, :32:28] wire [3:0] r_sel_lo_4 = _r_sel_T_7[3:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_8 = |r_sel_hi_4; // @[OneHot.scala:30:18, :32:14] wire [3:0] _r_sel_T_9 = r_sel_hi_4 | r_sel_lo_4; // @[OneHot.scala:30:18, :31:18, :32:28] wire [1:0] r_sel_hi_5 = _r_sel_T_9[3:2]; // @[OneHot.scala:30:18, :32:28] wire [1:0] r_sel_lo_5 = _r_sel_T_9[1:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_10 = |r_sel_hi_5; // @[OneHot.scala:30:18, :32:14] wire [1:0] _r_sel_T_11 = r_sel_hi_5 | r_sel_lo_5; // @[OneHot.scala:30:18, :31:18, :32:28] wire _r_sel_T_12 = _r_sel_T_11[1]; // @[OneHot.scala:32:28] wire [1:0] _r_sel_T_13 = {_r_sel_T_10, _r_sel_T_12}; // @[OneHot.scala:32:{10,14}] wire [2:0] _r_sel_T_14 = {_r_sel_T_8, _r_sel_T_13}; // @[OneHot.scala:32:{10,14}] wire [3:0] _r_sel_T_15 = {_r_sel_T_6, _r_sel_T_14}; // @[OneHot.scala:32:{10,14}] wire [4:0] _r_sel_T_16 = {_r_sel_T_4, _r_sel_T_15}; // @[OneHot.scala:32:{10,14}] wire [5:0] _r_sel_T_17 = {_r_sel_T_2, _r_sel_T_16}; // @[OneHot.scala:32:{10,14}] wire [6:0] _r_sel_T_18 = {_r_sel_T, _r_sel_T_17}; // @[OneHot.scala:32:{10,14}] reg [6:0] r_sel; // @[rename-freelist.scala:82:28] assign io_alloc_pregs_0_bits_0 = r_sel; // @[rename-freelist.scala:20:7, :82:28] wire _r_valid_T = ~io_reqs_0_0; // @[rename-freelist.scala:20:7, :84:27] wire _r_valid_T_1 = r_valid & _r_valid_T; // @[rename-freelist.scala:81:26, :84:{24,27}] wire _r_valid_T_2 = _r_valid_T_1 | can_sel; // @[rename-freelist.scala:80:27, :84:{24,39}] wire _sel_fire_0_T = ~r_valid; // @[rename-freelist.scala:81:26, :85:21] wire _sel_fire_0_T_1 = _sel_fire_0_T | io_reqs_0_0; // @[rename-freelist.scala:20:7, :85:{21,30}] assign _sel_fire_0_T_2 = _sel_fire_0_T_1 & can_sel; // @[rename-freelist.scala:80:27, :85:{30,45}] assign sel_fire_0 = _sel_fire_0_T_2; // @[rename-freelist.scala:55:23, :85:45] wire can_sel_1 = |sels_1; // @[util.scala:405:20] reg r_valid_1; // @[rename-freelist.scala:81:26] assign io_alloc_pregs_1_valid_0 = r_valid_1; // @[rename-freelist.scala:20:7, :81:26] wire [35:0] r_sel_hi_6 = sels_1[99:64]; // @[OneHot.scala:30:18] wire [63:0] r_sel_lo_6 = sels_1[63:0]; // @[OneHot.scala:31:18] wire _r_sel_T_19 = |r_sel_hi_6; // @[OneHot.scala:30:18, :32:14] wire [63:0] _r_sel_T_20 = {28'h0, r_sel_hi_6} | r_sel_lo_6; // @[OneHot.scala:30:18, :31:18, :32:28] wire [31:0] r_sel_hi_7 = _r_sel_T_20[63:32]; // @[OneHot.scala:30:18, :32:28] wire [31:0] r_sel_lo_7 = _r_sel_T_20[31:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_21 = |r_sel_hi_7; // @[OneHot.scala:30:18, :32:14] wire [31:0] _r_sel_T_22 = r_sel_hi_7 | r_sel_lo_7; // @[OneHot.scala:30:18, :31:18, :32:28] wire [15:0] r_sel_hi_8 = _r_sel_T_22[31:16]; // @[OneHot.scala:30:18, :32:28] wire [15:0] r_sel_lo_8 = _r_sel_T_22[15:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_23 = |r_sel_hi_8; // @[OneHot.scala:30:18, :32:14] wire [15:0] _r_sel_T_24 = r_sel_hi_8 | r_sel_lo_8; // @[OneHot.scala:30:18, :31:18, :32:28] wire [7:0] r_sel_hi_9 = _r_sel_T_24[15:8]; // @[OneHot.scala:30:18, :32:28] wire [7:0] r_sel_lo_9 = _r_sel_T_24[7:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_25 = |r_sel_hi_9; // @[OneHot.scala:30:18, :32:14] wire [7:0] _r_sel_T_26 = r_sel_hi_9 | r_sel_lo_9; // @[OneHot.scala:30:18, :31:18, :32:28] wire [3:0] r_sel_hi_10 = _r_sel_T_26[7:4]; // @[OneHot.scala:30:18, :32:28] wire [3:0] r_sel_lo_10 = _r_sel_T_26[3:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_27 = |r_sel_hi_10; // @[OneHot.scala:30:18, :32:14] wire [3:0] _r_sel_T_28 = r_sel_hi_10 | r_sel_lo_10; // @[OneHot.scala:30:18, :31:18, :32:28] wire [1:0] r_sel_hi_11 = _r_sel_T_28[3:2]; // @[OneHot.scala:30:18, :32:28] wire [1:0] r_sel_lo_11 = _r_sel_T_28[1:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_29 = |r_sel_hi_11; // @[OneHot.scala:30:18, :32:14] wire [1:0] _r_sel_T_30 = r_sel_hi_11 | r_sel_lo_11; // @[OneHot.scala:30:18, :31:18, :32:28] wire _r_sel_T_31 = _r_sel_T_30[1]; // @[OneHot.scala:32:28] wire [1:0] _r_sel_T_32 = {_r_sel_T_29, _r_sel_T_31}; // @[OneHot.scala:32:{10,14}] wire [2:0] _r_sel_T_33 = {_r_sel_T_27, _r_sel_T_32}; // @[OneHot.scala:32:{10,14}] wire [3:0] _r_sel_T_34 = {_r_sel_T_25, _r_sel_T_33}; // @[OneHot.scala:32:{10,14}] wire [4:0] _r_sel_T_35 = {_r_sel_T_23, _r_sel_T_34}; // @[OneHot.scala:32:{10,14}] wire [5:0] _r_sel_T_36 = {_r_sel_T_21, _r_sel_T_35}; // @[OneHot.scala:32:{10,14}] wire [6:0] _r_sel_T_37 = {_r_sel_T_19, _r_sel_T_36}; // @[OneHot.scala:32:{10,14}] reg [6:0] r_sel_1; // @[rename-freelist.scala:82:28] assign io_alloc_pregs_1_bits_0 = r_sel_1; // @[rename-freelist.scala:20:7, :82:28] wire _r_valid_T_3 = ~io_reqs_1_0; // @[rename-freelist.scala:20:7, :84:27] wire _r_valid_T_4 = r_valid_1 & _r_valid_T_3; // @[rename-freelist.scala:81:26, :84:{24,27}] wire _r_valid_T_5 = _r_valid_T_4 | can_sel_1; // @[rename-freelist.scala:80:27, :84:{24,39}] wire _sel_fire_1_T = ~r_valid_1; // @[rename-freelist.scala:81:26, :85:21] wire _sel_fire_1_T_1 = _sel_fire_1_T | io_reqs_1_0; // @[rename-freelist.scala:20:7, :85:{21,30}] assign _sel_fire_1_T_2 = _sel_fire_1_T_1 & can_sel_1; // @[rename-freelist.scala:80:27, :85:{30,45}] assign sel_fire_1 = _sel_fire_1_T_2; // @[rename-freelist.scala:55:23, :85:45] wire can_sel_2 = |sels_2; // @[util.scala:405:20] reg r_valid_2; // @[rename-freelist.scala:81:26] assign io_alloc_pregs_2_valid_0 = r_valid_2; // @[rename-freelist.scala:20:7, :81:26] wire [35:0] r_sel_hi_12 = sels_2[99:64]; // @[OneHot.scala:30:18] wire [63:0] r_sel_lo_12 = sels_2[63:0]; // @[OneHot.scala:31:18] wire _r_sel_T_38 = |r_sel_hi_12; // @[OneHot.scala:30:18, :32:14] wire [63:0] _r_sel_T_39 = {28'h0, r_sel_hi_12} | r_sel_lo_12; // @[OneHot.scala:30:18, :31:18, :32:28] wire [31:0] r_sel_hi_13 = _r_sel_T_39[63:32]; // @[OneHot.scala:30:18, :32:28] wire [31:0] r_sel_lo_13 = _r_sel_T_39[31:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_40 = |r_sel_hi_13; // @[OneHot.scala:30:18, :32:14] wire [31:0] _r_sel_T_41 = r_sel_hi_13 | r_sel_lo_13; // @[OneHot.scala:30:18, :31:18, :32:28] wire [15:0] r_sel_hi_14 = _r_sel_T_41[31:16]; // @[OneHot.scala:30:18, :32:28] wire [15:0] r_sel_lo_14 = _r_sel_T_41[15:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_42 = |r_sel_hi_14; // @[OneHot.scala:30:18, :32:14] wire [15:0] _r_sel_T_43 = r_sel_hi_14 | r_sel_lo_14; // @[OneHot.scala:30:18, :31:18, :32:28] wire [7:0] r_sel_hi_15 = _r_sel_T_43[15:8]; // @[OneHot.scala:30:18, :32:28] wire [7:0] r_sel_lo_15 = _r_sel_T_43[7:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_44 = |r_sel_hi_15; // @[OneHot.scala:30:18, :32:14] wire [7:0] _r_sel_T_45 = r_sel_hi_15 | r_sel_lo_15; // @[OneHot.scala:30:18, :31:18, :32:28] wire [3:0] r_sel_hi_16 = _r_sel_T_45[7:4]; // @[OneHot.scala:30:18, :32:28] wire [3:0] r_sel_lo_16 = _r_sel_T_45[3:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_46 = |r_sel_hi_16; // @[OneHot.scala:30:18, :32:14] wire [3:0] _r_sel_T_47 = r_sel_hi_16 | r_sel_lo_16; // @[OneHot.scala:30:18, :31:18, :32:28] wire [1:0] r_sel_hi_17 = _r_sel_T_47[3:2]; // @[OneHot.scala:30:18, :32:28] wire [1:0] r_sel_lo_17 = _r_sel_T_47[1:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sel_T_48 = |r_sel_hi_17; // @[OneHot.scala:30:18, :32:14] wire [1:0] _r_sel_T_49 = r_sel_hi_17 | r_sel_lo_17; // @[OneHot.scala:30:18, :31:18, :32:28] wire _r_sel_T_50 = _r_sel_T_49[1]; // @[OneHot.scala:32:28] wire [1:0] _r_sel_T_51 = {_r_sel_T_48, _r_sel_T_50}; // @[OneHot.scala:32:{10,14}] wire [2:0] _r_sel_T_52 = {_r_sel_T_46, _r_sel_T_51}; // @[OneHot.scala:32:{10,14}] wire [3:0] _r_sel_T_53 = {_r_sel_T_44, _r_sel_T_52}; // @[OneHot.scala:32:{10,14}] wire [4:0] _r_sel_T_54 = {_r_sel_T_42, _r_sel_T_53}; // @[OneHot.scala:32:{10,14}] wire [5:0] _r_sel_T_55 = {_r_sel_T_40, _r_sel_T_54}; // @[OneHot.scala:32:{10,14}] wire [6:0] _r_sel_T_56 = {_r_sel_T_38, _r_sel_T_55}; // @[OneHot.scala:32:{10,14}] reg [6:0] r_sel_2; // @[rename-freelist.scala:82:28] assign io_alloc_pregs_2_bits_0 = r_sel_2; // @[rename-freelist.scala:20:7, :82:28] wire _r_valid_T_6 = ~io_reqs_2_0; // @[rename-freelist.scala:20:7, :84:27] wire _r_valid_T_7 = r_valid_2 & _r_valid_T_6; // @[rename-freelist.scala:81:26, :84:{24,27}] wire _r_valid_T_8 = _r_valid_T_7 | can_sel_2; // @[rename-freelist.scala:80:27, :84:{24,39}] wire _sel_fire_2_T = ~r_valid_2; // @[rename-freelist.scala:81:26, :85:21] wire _sel_fire_2_T_1 = _sel_fire_2_T | io_reqs_2_0; // @[rename-freelist.scala:20:7, :85:{21,30}] assign _sel_fire_2_T_2 = _sel_fire_2_T_1 & can_sel_2; // @[rename-freelist.scala:80:27, :85:{30,45}] assign sel_fire_2 = _sel_fire_2_T_2; // @[rename-freelist.scala:55:23, :85:45] wire [99:0] _io_debug_freelist_T_1 = {100{io_alloc_pregs_0_valid_0}}; // @[rename-freelist.scala:20:7, :91:83] wire [127:0] _io_debug_freelist_T_2 = {28'h0, _io_debug_freelist_T[99:0] & _io_debug_freelist_T_1}; // @[OneHot.scala:58:35] wire [99:0] _io_debug_freelist_T_4 = {100{io_alloc_pregs_1_valid_0}}; // @[rename-freelist.scala:20:7, :91:83] wire [127:0] _io_debug_freelist_T_5 = {28'h0, _io_debug_freelist_T_3[99:0] & _io_debug_freelist_T_4}; // @[OneHot.scala:58:35] wire [99:0] _io_debug_freelist_T_7 = {100{io_alloc_pregs_2_valid_0}}; // @[rename-freelist.scala:20:7, :91:83] wire [127:0] _io_debug_freelist_T_8 = {28'h0, _io_debug_freelist_T_6[99:0] & _io_debug_freelist_T_7}; // @[OneHot.scala:58:35] wire [127:0] _io_debug_freelist_T_9 = _io_debug_freelist_T_2 | _io_debug_freelist_T_5; // @[rename-freelist.scala:91:{77,104}] wire [127:0] _io_debug_freelist_T_10 = _io_debug_freelist_T_9 | _io_debug_freelist_T_8; // @[rename-freelist.scala:91:{77,104}] wire [127:0] _io_debug_freelist_T_11 = {28'h0, free_list} | _io_debug_freelist_T_10; // @[rename-freelist.scala:50:26, :91:{34,104}] assign io_debug_freelist_0 = _io_debug_freelist_T_11[99:0]; // @[rename-freelist.scala:20:7, :91:{21,34}]
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_62( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [2:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [3:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] reg [2:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [3:0] source_1; // @[Monitor.scala:541:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [9:0] inflight; // @[Monitor.scala:614:27] reg [39:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [39:0] inflight_sizes; // @[Monitor.scala:618:33] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire [15:0] _GEN_0 = {12'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [15:0] _GEN_3 = {12'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [9:0] inflight_1; // @[Monitor.scala:726:35] reg [39:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_38( // @[AsyncQueue.scala:58:7] input io_in, // @[AsyncQueue.scala:59:14] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in_0 = io_in; // @[AsyncQueue.scala:58:7] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_51 io_out_source_extend ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_d (io_in_0), // @[AsyncQueue.scala:58:7] .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Tile.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ import Util._ /** * A Tile is a purely combinational 2D array of passThrough PEs. * a, b, s, and in_propag are broadcast across the entire array and are passed through to the Tile's outputs * @param width The data width of each PE in bits * @param rows Number of PEs on each row * @param columns Number of PEs on each column */ class Tile[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, tree_reduction: Boolean, max_simultaneous_matmuls: Int, val rows: Int, val columns: Int)(implicit ev: Arithmetic[T]) extends Module { val io = IO(new Bundle { val in_a = Input(Vec(rows, inputType)) val in_b = Input(Vec(columns, outputType)) // This is the output of the tile next to it val in_d = Input(Vec(columns, outputType)) val in_control = Input(Vec(columns, new PEControl(accType))) val in_id = Input(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val in_last = Input(Vec(columns, Bool())) val out_a = Output(Vec(rows, inputType)) val out_c = Output(Vec(columns, outputType)) val out_b = Output(Vec(columns, outputType)) val out_control = Output(Vec(columns, new PEControl(accType))) val out_id = Output(Vec(columns, UInt(log2Up(max_simultaneous_matmuls).W))) val out_last = Output(Vec(columns, Bool())) val in_valid = Input(Vec(columns, Bool())) val out_valid = Output(Vec(columns, Bool())) val bad_dataflow = Output(Bool()) }) import ev._ val tile = Seq.fill(rows, columns)(Module(new PE(inputType, outputType, accType, df, max_simultaneous_matmuls))) val tileT = tile.transpose // TODO: abstract hori/vert broadcast, all these connections look the same // Broadcast 'a' horizontally across the Tile for (r <- 0 until rows) { tile(r).foldLeft(io.in_a(r)) { case (in_a, pe) => pe.io.in_a := in_a pe.io.out_a } } // Broadcast 'b' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_b(c)) { case (in_b, pe) => pe.io.in_b := (if (tree_reduction) in_b.zero else in_b) pe.io.out_b } } // Broadcast 'd' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_d(c)) { case (in_d, pe) => pe.io.in_d := in_d pe.io.out_c } } // Broadcast 'control' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_control(c)) { case (in_ctrl, pe) => pe.io.in_control := in_ctrl pe.io.out_control } } // Broadcast 'garbage' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_valid(c)) { case (v, pe) => pe.io.in_valid := v pe.io.out_valid } } // Broadcast 'id' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_id(c)) { case (id, pe) => pe.io.in_id := id pe.io.out_id } } // Broadcast 'last' vertically across the Tile for (c <- 0 until columns) { tileT(c).foldLeft(io.in_last(c)) { case (last, pe) => pe.io.in_last := last pe.io.out_last } } // Drive the Tile's bottom IO for (c <- 0 until columns) { io.out_c(c) := tile(rows-1)(c).io.out_c io.out_control(c) := tile(rows-1)(c).io.out_control io.out_id(c) := tile(rows-1)(c).io.out_id io.out_last(c) := tile(rows-1)(c).io.out_last io.out_valid(c) := tile(rows-1)(c).io.out_valid io.out_b(c) := { if (tree_reduction) { val prods = tileT(c).map(_.io.out_b) accumulateTree(prods :+ io.in_b(c)) } else { tile(rows - 1)(c).io.out_b } } } io.bad_dataflow := tile.map(_.map(_.io.bad_dataflow).reduce(_||_)).reduce(_||_) // Drive the Tile's right IO for (r <- 0 until rows) { io.out_a(r) := tile(r)(columns-1).io.out_a } }
module Tile_129( // @[Tile.scala:16:7] input clock, // @[Tile.scala:16:7] input reset, // @[Tile.scala:16:7] input [7:0] io_in_a_0, // @[Tile.scala:17:14] input [19:0] io_in_b_0, // @[Tile.scala:17:14] input [19:0] io_in_d_0, // @[Tile.scala:17:14] input io_in_control_0_dataflow, // @[Tile.scala:17:14] input io_in_control_0_propagate, // @[Tile.scala:17:14] input [4:0] io_in_control_0_shift, // @[Tile.scala:17:14] input [2:0] io_in_id_0, // @[Tile.scala:17:14] input io_in_last_0, // @[Tile.scala:17:14] output [7:0] io_out_a_0, // @[Tile.scala:17:14] output [19:0] io_out_c_0, // @[Tile.scala:17:14] output [19:0] io_out_b_0, // @[Tile.scala:17:14] output io_out_control_0_dataflow, // @[Tile.scala:17:14] output io_out_control_0_propagate, // @[Tile.scala:17:14] output [4:0] io_out_control_0_shift, // @[Tile.scala:17:14] output [2:0] io_out_id_0, // @[Tile.scala:17:14] output io_out_last_0, // @[Tile.scala:17:14] input io_in_valid_0, // @[Tile.scala:17:14] output io_out_valid_0 // @[Tile.scala:17:14] ); wire [7:0] io_in_a_0_0 = io_in_a_0; // @[Tile.scala:16:7] wire [19:0] io_in_b_0_0 = io_in_b_0; // @[Tile.scala:16:7] wire [19:0] io_in_d_0_0 = io_in_d_0; // @[Tile.scala:16:7] wire io_in_control_0_dataflow_0 = io_in_control_0_dataflow; // @[Tile.scala:16:7] wire io_in_control_0_propagate_0 = io_in_control_0_propagate; // @[Tile.scala:16:7] wire [4:0] io_in_control_0_shift_0 = io_in_control_0_shift; // @[Tile.scala:16:7] wire [2:0] io_in_id_0_0 = io_in_id_0; // @[Tile.scala:16:7] wire io_in_last_0_0 = io_in_last_0; // @[Tile.scala:16:7] wire io_in_valid_0_0 = io_in_valid_0; // @[Tile.scala:16:7] wire io_bad_dataflow = 1'h0; // @[Tile.scala:16:7, :17:14, :42:44] wire [7:0] io_out_a_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_c_0_0; // @[Tile.scala:16:7] wire [19:0] io_out_b_0_0; // @[Tile.scala:16:7] wire io_out_control_0_dataflow_0; // @[Tile.scala:16:7] wire io_out_control_0_propagate_0; // @[Tile.scala:16:7] wire [4:0] io_out_control_0_shift_0; // @[Tile.scala:16:7] wire [2:0] io_out_id_0_0; // @[Tile.scala:16:7] wire io_out_last_0_0; // @[Tile.scala:16:7] wire io_out_valid_0_0; // @[Tile.scala:16:7] PE_385 tile_0_0 ( // @[Tile.scala:42:44] .clock (clock), .reset (reset), .io_in_a (io_in_a_0_0), // @[Tile.scala:16:7] .io_in_b (io_in_b_0_0), // @[Tile.scala:16:7] .io_in_d (io_in_d_0_0), // @[Tile.scala:16:7] .io_out_a (io_out_a_0_0), .io_out_b (io_out_b_0_0), .io_out_c (io_out_c_0_0), .io_in_control_dataflow (io_in_control_0_dataflow_0), // @[Tile.scala:16:7] .io_in_control_propagate (io_in_control_0_propagate_0), // @[Tile.scala:16:7] .io_in_control_shift (io_in_control_0_shift_0), // @[Tile.scala:16:7] .io_out_control_dataflow (io_out_control_0_dataflow_0), .io_out_control_propagate (io_out_control_0_propagate_0), .io_out_control_shift (io_out_control_0_shift_0), .io_in_id (io_in_id_0_0), // @[Tile.scala:16:7] .io_out_id (io_out_id_0_0), .io_in_last (io_in_last_0_0), // @[Tile.scala:16:7] .io_out_last (io_out_last_0_0), .io_in_valid (io_in_valid_0_0), // @[Tile.scala:16:7] .io_out_valid (io_out_valid_0_0) ); // @[Tile.scala:42:44] assign io_out_a_0 = io_out_a_0_0; // @[Tile.scala:16:7] assign io_out_c_0 = io_out_c_0_0; // @[Tile.scala:16:7] assign io_out_b_0 = io_out_b_0_0; // @[Tile.scala:16:7] assign io_out_control_0_dataflow = io_out_control_0_dataflow_0; // @[Tile.scala:16:7] assign io_out_control_0_propagate = io_out_control_0_propagate_0; // @[Tile.scala:16:7] assign io_out_control_0_shift = io_out_control_0_shift_0; // @[Tile.scala:16:7] assign io_out_id_0 = io_out_id_0_0; // @[Tile.scala:16:7] assign io_out_last_0 = io_out_last_0_0; // @[Tile.scala:16:7] assign io_out_valid_0 = io_out_valid_0_0; // @[Tile.scala:16:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RecFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class RecFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val in = Input(Bits((inExpWidth + inSigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in); if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- io.out := io.in<<(outSigWidth - inSigWidth) io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W) } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( inExpWidth, inSigWidth, outExpWidth, outSigWidth, flRoundOpt_sigMSBitAlwaysZero )) roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn) roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := rawIn roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } }
module RecFNToRecFN_163( // @[RecFNToRecFN.scala:44:5] input [32:0] io_in, // @[RecFNToRecFN.scala:48:16] output [32:0] io_out // @[RecFNToRecFN.scala:48:16] ); wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5] wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16] wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16] wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35] wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54] wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5] wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5] wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25] assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35] wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23] wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}] wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23] assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46] assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54] assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Metadata.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.constants.MemoryOpConstants import freechips.rocketchip.util._ object ClientStates { val width = 2 def Nothing = 0.U(width.W) def Branch = 1.U(width.W) def Trunk = 2.U(width.W) def Dirty = 3.U(width.W) def hasReadPermission(state: UInt): Bool = state > Nothing def hasWritePermission(state: UInt): Bool = state > Branch } object MemoryOpCategories extends MemoryOpConstants { def wr = Cat(true.B, true.B) // Op actually writes def wi = Cat(false.B, true.B) // Future op will write def rd = Cat(false.B, false.B) // Op only reads def categorize(cmd: UInt): UInt = { val cat = Cat(isWrite(cmd), isWriteIntent(cmd)) //assert(cat.isOneOf(wr,wi,rd), "Could not categorize command.") cat } } /** Stores the client-side coherence information, * such as permissions on the data and whether the data is dirty. * Its API can be used to make TileLink messages in response to * memory operations, cache control oeprations, or Probe messages. */ class ClientMetadata extends Bundle { /** Actual state information stored in this bundle */ val state = UInt(ClientStates.width.W) /** Metadata equality */ def ===(rhs: UInt): Bool = state === rhs def ===(rhs: ClientMetadata): Bool = state === rhs.state def =/=(rhs: ClientMetadata): Bool = !this.===(rhs) /** Is the block's data present in this cache */ def isValid(dummy: Int = 0): Bool = state > ClientStates.Nothing /** Determine whether this cmd misses, and the new state (on hit) or param to be sent (on miss) */ private def growStarter(cmd: UInt): (Bool, UInt) = { import MemoryOpCategories._ import TLPermissions._ import ClientStates._ val c = categorize(cmd) MuxTLookup(Cat(c, state), (false.B, 0.U), Seq( //(effect, am now) -> (was a hit, next) Cat(rd, Dirty) -> (true.B, Dirty), Cat(rd, Trunk) -> (true.B, Trunk), Cat(rd, Branch) -> (true.B, Branch), Cat(wi, Dirty) -> (true.B, Dirty), Cat(wi, Trunk) -> (true.B, Trunk), Cat(wr, Dirty) -> (true.B, Dirty), Cat(wr, Trunk) -> (true.B, Dirty), //(effect, am now) -> (was a miss, param) Cat(rd, Nothing) -> (false.B, NtoB), Cat(wi, Branch) -> (false.B, BtoT), Cat(wi, Nothing) -> (false.B, NtoT), Cat(wr, Branch) -> (false.B, BtoT), Cat(wr, Nothing) -> (false.B, NtoT))) } /** Determine what state to go to after miss based on Grant param * For now, doesn't depend on state (which may have been Probed). */ private def growFinisher(cmd: UInt, param: UInt): UInt = { import MemoryOpCategories._ import TLPermissions._ import ClientStates._ val c = categorize(cmd) //assert(c === rd || param === toT, "Client was expecting trunk permissions.") MuxLookup(Cat(c, param), Nothing)(Seq( //(effect param) -> (next) Cat(rd, toB) -> Branch, Cat(rd, toT) -> Trunk, Cat(wi, toT) -> Trunk, Cat(wr, toT) -> Dirty)) } /** Does this cache have permissions on this block sufficient to perform op, * and what to do next (Acquire message param or updated metadata). */ def onAccess(cmd: UInt): (Bool, UInt, ClientMetadata) = { val r = growStarter(cmd) (r._1, r._2, ClientMetadata(r._2)) } /** Does a secondary miss on the block require another Acquire message */ def onSecondaryAccess(first_cmd: UInt, second_cmd: UInt): (Bool, Bool, UInt, ClientMetadata, UInt) = { import MemoryOpCategories._ val r1 = growStarter(first_cmd) val r2 = growStarter(second_cmd) val needs_second_acq = isWriteIntent(second_cmd) && !isWriteIntent(first_cmd) val hit_again = r1._1 && r2._1 val dirties = categorize(second_cmd) === wr val biggest_grow_param = Mux(dirties, r2._2, r1._2) val dirtiest_state = ClientMetadata(biggest_grow_param) val dirtiest_cmd = Mux(dirties, second_cmd, first_cmd) (needs_second_acq, hit_again, biggest_grow_param, dirtiest_state, dirtiest_cmd) } /** Metadata change on a returned Grant */ def onGrant(cmd: UInt, param: UInt): ClientMetadata = ClientMetadata(growFinisher(cmd, param)) /** Determine what state to go to based on Probe param */ private def shrinkHelper(param: UInt): (Bool, UInt, UInt) = { import ClientStates._ import TLPermissions._ MuxTLookup(Cat(param, state), (false.B, 0.U, 0.U), Seq( //(wanted, am now) -> (hasDirtyData resp, next) Cat(toT, Dirty) -> (true.B, TtoT, Trunk), Cat(toT, Trunk) -> (false.B, TtoT, Trunk), Cat(toT, Branch) -> (false.B, BtoB, Branch), Cat(toT, Nothing) -> (false.B, NtoN, Nothing), Cat(toB, Dirty) -> (true.B, TtoB, Branch), Cat(toB, Trunk) -> (false.B, TtoB, Branch), // Policy: Don't notify on clean downgrade Cat(toB, Branch) -> (false.B, BtoB, Branch), Cat(toB, Nothing) -> (false.B, NtoN, Nothing), Cat(toN, Dirty) -> (true.B, TtoN, Nothing), Cat(toN, Trunk) -> (false.B, TtoN, Nothing), // Policy: Don't notify on clean downgrade Cat(toN, Branch) -> (false.B, BtoN, Nothing), // Policy: Don't notify on clean downgrade Cat(toN, Nothing) -> (false.B, NtoN, Nothing))) } /** Translate cache control cmds into Probe param */ private def cmdToPermCap(cmd: UInt): UInt = { import MemoryOpCategories._ import TLPermissions._ MuxLookup(cmd, toN)(Seq( M_FLUSH -> toN, M_PRODUCE -> toB, M_CLEAN -> toT)) } def onCacheControl(cmd: UInt): (Bool, UInt, ClientMetadata) = { val r = shrinkHelper(cmdToPermCap(cmd)) (r._1, r._2, ClientMetadata(r._3)) } def onProbe(param: UInt): (Bool, UInt, ClientMetadata) = { val r = shrinkHelper(param) (r._1, r._2, ClientMetadata(r._3)) } } /** Factories for ClientMetadata, including on reset */ object ClientMetadata { def apply(perm: UInt) = { val meta = Wire(new ClientMetadata) meta.state := perm meta } def onReset = ClientMetadata(ClientStates.Nothing) def maximum = ClientMetadata(ClientStates.Dirty) } File dcache.scala: //****************************************************************************** // Ported from Rocket-Chip // See LICENSE.Berkeley and LICENSE.SiFive in Rocket-Chip for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.lsu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.tile._ import freechips.rocketchip.util._ import freechips.rocketchip.rocket._ import boom.v3.common._ import boom.v3.exu.BrUpdateInfo import boom.v3.util.{IsKilledByBranch, GetNewBrMask, BranchKillableQueue, IsOlder, UpdateBrMask, AgePriorityEncoder, WrapInc, Transpose} class BoomWritebackUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) { val io = IO(new Bundle { val req = Flipped(Decoupled(new WritebackReq(edge.bundle))) val meta_read = Decoupled(new L1MetaReadReq) val resp = Output(Bool()) val idx = Output(Valid(UInt())) val data_req = Decoupled(new L1DataReadReq) val data_resp = Input(UInt(encRowBits.W)) val mem_grant = Input(Bool()) val release = Decoupled(new TLBundleC(edge.bundle)) val lsu_release = Decoupled(new TLBundleC(edge.bundle)) }) val req = Reg(new WritebackReq(edge.bundle)) val s_invalid :: s_fill_buffer :: s_lsu_release :: s_active :: s_grant :: Nil = Enum(5) val state = RegInit(s_invalid) val r1_data_req_fired = RegInit(false.B) val r2_data_req_fired = RegInit(false.B) val r1_data_req_cnt = Reg(UInt(log2Up(refillCycles+1).W)) val r2_data_req_cnt = Reg(UInt(log2Up(refillCycles+1).W)) val data_req_cnt = RegInit(0.U(log2Up(refillCycles+1).W)) val (_, last_beat, all_beats_done, beat_count) = edge.count(io.release) val wb_buffer = Reg(Vec(refillCycles, UInt(encRowBits.W))) val acked = RegInit(false.B) io.idx.valid := state =/= s_invalid io.idx.bits := req.idx io.release.valid := false.B io.release.bits := DontCare io.req.ready := false.B io.meta_read.valid := false.B io.meta_read.bits := DontCare io.data_req.valid := false.B io.data_req.bits := DontCare io.resp := false.B io.lsu_release.valid := false.B io.lsu_release.bits := DontCare val r_address = Cat(req.tag, req.idx) << blockOffBits val id = cfg.nMSHRs val probeResponse = edge.ProbeAck( fromSource = id.U, toAddress = r_address, lgSize = lgCacheBlockBytes.U, reportPermissions = req.param, data = wb_buffer(data_req_cnt)) val voluntaryRelease = edge.Release( fromSource = id.U, toAddress = r_address, lgSize = lgCacheBlockBytes.U, shrinkPermissions = req.param, data = wb_buffer(data_req_cnt))._2 when (state === s_invalid) { io.req.ready := true.B when (io.req.fire) { state := s_fill_buffer data_req_cnt := 0.U req := io.req.bits acked := false.B } } .elsewhen (state === s_fill_buffer) { io.meta_read.valid := data_req_cnt < refillCycles.U io.meta_read.bits.idx := req.idx io.meta_read.bits.tag := req.tag io.data_req.valid := data_req_cnt < refillCycles.U io.data_req.bits.way_en := req.way_en io.data_req.bits.addr := (if(refillCycles > 1) Cat(req.idx, data_req_cnt(log2Up(refillCycles)-1,0)) else req.idx) << rowOffBits r1_data_req_fired := false.B r1_data_req_cnt := 0.U r2_data_req_fired := r1_data_req_fired r2_data_req_cnt := r1_data_req_cnt when (io.data_req.fire && io.meta_read.fire) { r1_data_req_fired := true.B r1_data_req_cnt := data_req_cnt data_req_cnt := data_req_cnt + 1.U } when (r2_data_req_fired) { wb_buffer(r2_data_req_cnt) := io.data_resp when (r2_data_req_cnt === (refillCycles-1).U) { io.resp := true.B state := s_lsu_release data_req_cnt := 0.U } } } .elsewhen (state === s_lsu_release) { io.lsu_release.valid := true.B io.lsu_release.bits := probeResponse when (io.lsu_release.fire) { state := s_active } } .elsewhen (state === s_active) { io.release.valid := data_req_cnt < refillCycles.U io.release.bits := Mux(req.voluntary, voluntaryRelease, probeResponse) when (io.mem_grant) { acked := true.B } when (io.release.fire) { data_req_cnt := data_req_cnt + 1.U } when ((data_req_cnt === (refillCycles-1).U) && io.release.fire) { state := Mux(req.voluntary, s_grant, s_invalid) } } .elsewhen (state === s_grant) { when (io.mem_grant) { acked := true.B } when (acked) { state := s_invalid } } } class BoomProbeUnit(implicit edge: TLEdgeOut, p: Parameters) extends L1HellaCacheModule()(p) { val io = IO(new Bundle { val req = Flipped(Decoupled(new TLBundleB(edge.bundle))) val rep = Decoupled(new TLBundleC(edge.bundle)) val meta_read = Decoupled(new L1MetaReadReq) val meta_write = Decoupled(new L1MetaWriteReq) val wb_req = Decoupled(new WritebackReq(edge.bundle)) val way_en = Input(UInt(nWays.W)) val wb_rdy = Input(Bool()) // Is writeback unit currently busy? If so need to retry meta read when its done val mshr_rdy = Input(Bool()) // Is MSHR ready for this request to proceed? val mshr_wb_rdy = Output(Bool()) // Should we block MSHR writebacks while we finish our own? val block_state = Input(new ClientMetadata()) val lsu_release = Decoupled(new TLBundleC(edge.bundle)) val state = Output(Valid(UInt(coreMaxAddrBits.W))) }) val (s_invalid :: s_meta_read :: s_meta_resp :: s_mshr_req :: s_mshr_resp :: s_lsu_release :: s_release :: s_writeback_req :: s_writeback_resp :: s_meta_write :: s_meta_write_resp :: Nil) = Enum(11) val state = RegInit(s_invalid) val req = Reg(new TLBundleB(edge.bundle)) val req_idx = req.address(idxMSB, idxLSB) val req_tag = req.address >> untagBits val way_en = Reg(UInt()) val tag_matches = way_en.orR val old_coh = Reg(new ClientMetadata) val miss_coh = ClientMetadata.onReset val reply_coh = Mux(tag_matches, old_coh, miss_coh) val (is_dirty, report_param, new_coh) = reply_coh.onProbe(req.param) io.state.valid := state =/= s_invalid io.state.bits := req.address io.req.ready := state === s_invalid io.rep.valid := state === s_release io.rep.bits := edge.ProbeAck(req, report_param) assert(!io.rep.valid || !edge.hasData(io.rep.bits), "ProbeUnit should not send ProbeAcks with data, WritebackUnit should handle it") io.meta_read.valid := state === s_meta_read io.meta_read.bits.idx := req_idx io.meta_read.bits.tag := req_tag io.meta_read.bits.way_en := ~(0.U(nWays.W)) io.meta_write.valid := state === s_meta_write io.meta_write.bits.way_en := way_en io.meta_write.bits.idx := req_idx io.meta_write.bits.tag := req_tag io.meta_write.bits.data.tag := req_tag io.meta_write.bits.data.coh := new_coh io.wb_req.valid := state === s_writeback_req io.wb_req.bits.source := req.source io.wb_req.bits.idx := req_idx io.wb_req.bits.tag := req_tag io.wb_req.bits.param := report_param io.wb_req.bits.way_en := way_en io.wb_req.bits.voluntary := false.B io.mshr_wb_rdy := !state.isOneOf(s_release, s_writeback_req, s_writeback_resp, s_meta_write, s_meta_write_resp) io.lsu_release.valid := state === s_lsu_release io.lsu_release.bits := edge.ProbeAck(req, report_param) // state === s_invalid when (state === s_invalid) { when (io.req.fire) { state := s_meta_read req := io.req.bits } } .elsewhen (state === s_meta_read) { when (io.meta_read.fire) { state := s_meta_resp } } .elsewhen (state === s_meta_resp) { // we need to wait one cycle for the metadata to be read from the array state := s_mshr_req } .elsewhen (state === s_mshr_req) { old_coh := io.block_state way_en := io.way_en // if the read didn't go through, we need to retry state := Mux(io.mshr_rdy && io.wb_rdy, s_mshr_resp, s_meta_read) } .elsewhen (state === s_mshr_resp) { state := Mux(tag_matches && is_dirty, s_writeback_req, s_lsu_release) } .elsewhen (state === s_lsu_release) { when (io.lsu_release.fire) { state := s_release } } .elsewhen (state === s_release) { when (io.rep.ready) { state := Mux(tag_matches, s_meta_write, s_invalid) } } .elsewhen (state === s_writeback_req) { when (io.wb_req.fire) { state := s_writeback_resp } } .elsewhen (state === s_writeback_resp) { // wait for the writeback request to finish before updating the metadata when (io.wb_req.ready) { state := s_meta_write } } .elsewhen (state === s_meta_write) { when (io.meta_write.fire) { state := s_meta_write_resp } } .elsewhen (state === s_meta_write_resp) { state := s_invalid } } class BoomL1MetaReadReq(implicit p: Parameters) extends BoomBundle()(p) { val req = Vec(memWidth, new L1MetaReadReq) } class BoomL1DataReadReq(implicit p: Parameters) extends BoomBundle()(p) { val req = Vec(memWidth, new L1DataReadReq) val valid = Vec(memWidth, Bool()) } abstract class AbstractBoomDataArray(implicit p: Parameters) extends BoomModule with HasL1HellaCacheParameters { val io = IO(new BoomBundle { val read = Input(Vec(memWidth, Valid(new L1DataReadReq))) val write = Input(Valid(new L1DataWriteReq)) val resp = Output(Vec(memWidth, Vec(nWays, Bits(encRowBits.W)))) val nacks = Output(Vec(memWidth, Bool())) }) def pipeMap[T <: Data](f: Int => T) = VecInit((0 until memWidth).map(f)) } class BoomDuplicatedDataArray(implicit p: Parameters) extends AbstractBoomDataArray { val waddr = io.write.bits.addr >> rowOffBits for (j <- 0 until memWidth) { val raddr = io.read(j).bits.addr >> rowOffBits for (w <- 0 until nWays) { val array = DescribedSRAM( name = s"array_${w}_${j}", desc = "Non-blocking DCache Data Array", size = nSets * refillCycles, data = Vec(rowWords, Bits(encDataBits.W)) ) when (io.write.bits.way_en(w) && io.write.valid) { val data = VecInit((0 until rowWords) map (i => io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i))) array.write(waddr, data, io.write.bits.wmask.asBools) } io.resp(j)(w) := RegNext(array.read(raddr, io.read(j).bits.way_en(w) && io.read(j).valid).asUInt) } io.nacks(j) := false.B } } class BoomBankedDataArray(implicit p: Parameters) extends AbstractBoomDataArray { val nBanks = boomParams.numDCacheBanks val bankSize = nSets * refillCycles / nBanks require (nBanks >= memWidth) require (bankSize > 0) val bankBits = log2Ceil(nBanks) val bankOffBits = log2Ceil(rowWords) + log2Ceil(wordBytes) val bidxBits = log2Ceil(bankSize) val bidxOffBits = bankOffBits + bankBits //---------------------------------------------------------------------------------------------------- val s0_rbanks = if (nBanks > 1) VecInit(io.read.map(r => (r.bits.addr >> bankOffBits)(bankBits-1,0))) else VecInit(0.U) val s0_wbank = if (nBanks > 1) (io.write.bits.addr >> bankOffBits)(bankBits-1,0) else 0.U val s0_ridxs = VecInit(io.read.map(r => (r.bits.addr >> bidxOffBits)(bidxBits-1,0))) val s0_widx = (io.write.bits.addr >> bidxOffBits)(bidxBits-1,0) val s0_read_valids = VecInit(io.read.map(_.valid)) val s0_bank_conflicts = pipeMap(w => (0 until w).foldLeft(false.B)((c,i) => c || io.read(i).valid && s0_rbanks(i) === s0_rbanks(w))) val s0_do_bank_read = s0_read_valids zip s0_bank_conflicts map {case (v,c) => v && !c} val s0_bank_read_gnts = Transpose(VecInit(s0_rbanks zip s0_do_bank_read map {case (b,d) => VecInit((UIntToOH(b) & Fill(nBanks,d)).asBools)})) val s0_bank_write_gnt = (UIntToOH(s0_wbank) & Fill(nBanks, io.write.valid)).asBools //---------------------------------------------------------------------------------------------------- val s1_rbanks = RegNext(s0_rbanks) val s1_ridxs = RegNext(s0_ridxs) val s1_read_valids = RegNext(s0_read_valids) val s1_pipe_selection = pipeMap(i => VecInit(PriorityEncoderOH(pipeMap(j => if (j < i) s1_read_valids(j) && s1_rbanks(j) === s1_rbanks(i) else if (j == i) true.B else false.B)))) val s1_ridx_match = pipeMap(i => pipeMap(j => if (j < i) s1_ridxs(j) === s1_ridxs(i) else if (j == i) true.B else false.B)) val s1_nacks = pipeMap(w => s1_read_valids(w) && (s1_pipe_selection(w).asUInt & ~s1_ridx_match(w).asUInt).orR) val s1_bank_selection = pipeMap(w => Mux1H(s1_pipe_selection(w), s1_rbanks)) //---------------------------------------------------------------------------------------------------- val s2_bank_selection = RegNext(s1_bank_selection) val s2_nacks = RegNext(s1_nacks) for (w <- 0 until nWays) { val s2_bank_reads = Reg(Vec(nBanks, Bits(encRowBits.W))) for (b <- 0 until nBanks) { val array = DescribedSRAM( name = s"array_${w}_${b}", desc = "Non-blocking DCache Data Array", size = bankSize, data = Vec(rowWords, Bits(encDataBits.W)) ) val ridx = Mux1H(s0_bank_read_gnts(b), s0_ridxs) val way_en = Mux1H(s0_bank_read_gnts(b), io.read.map(_.bits.way_en)) s2_bank_reads(b) := array.read(ridx, way_en(w) && s0_bank_read_gnts(b).reduce(_||_)).asUInt when (io.write.bits.way_en(w) && s0_bank_write_gnt(b)) { val data = VecInit((0 until rowWords) map (i => io.write.bits.data(encDataBits*(i+1)-1,encDataBits*i))) array.write(s0_widx, data, io.write.bits.wmask.asBools) } } for (i <- 0 until memWidth) { io.resp(i)(w) := s2_bank_reads(s2_bank_selection(i)) } } io.nacks := s2_nacks } /** * Top level class wrapping a non-blocking dcache. * * @param hartid hardware thread for the cache */ class BoomNonBlockingDCache(staticIdForMetadataUseOnly: Int)(implicit p: Parameters) extends LazyModule { private val tileParams = p(TileKey) protected val cfg = tileParams.dcache.get protected def cacheClientParameters = cfg.scratch.map(x => Seq()).getOrElse(Seq(TLMasterParameters.v1( name = s"Core ${staticIdForMetadataUseOnly} DCache", sourceId = IdRange(0, 1 max (cfg.nMSHRs + 1)), supportsProbe = TransferSizes(cfg.blockBytes, cfg.blockBytes)))) protected def mmioClientParameters = Seq(TLMasterParameters.v1( name = s"Core ${staticIdForMetadataUseOnly} DCache MMIO", sourceId = IdRange(cfg.nMSHRs + 1, cfg.nMSHRs + 1 + cfg.nMMIOs), requestFifo = true)) val node = TLClientNode(Seq(TLMasterPortParameters.v1( cacheClientParameters ++ mmioClientParameters, minLatency = 1))) lazy val module = new BoomNonBlockingDCacheModule(this) def flushOnFenceI = cfg.scratch.isEmpty && !node.edges.out(0).manager.managers.forall(m => !m.supportsAcquireT || !m.executable || m.regionType >= RegionType.TRACKED || m.regionType <= RegionType.IDEMPOTENT) require(!tileParams.core.haveCFlush || cfg.scratch.isEmpty, "CFLUSH_D_L1 instruction requires a D$") } class BoomDCacheBundle(implicit p: Parameters, edge: TLEdgeOut) extends BoomBundle()(p) { val lsu = Flipped(new LSUDMemIO) } class BoomNonBlockingDCacheModule(outer: BoomNonBlockingDCache) extends LazyModuleImp(outer) with HasL1HellaCacheParameters with HasBoomCoreParameters { implicit val edge = outer.node.edges.out(0) val (tl_out, _) = outer.node.out(0) val io = IO(new BoomDCacheBundle) private val fifoManagers = edge.manager.managers.filter(TLFIFOFixer.allVolatile) fifoManagers.foreach { m => require (m.fifoId == fifoManagers.head.fifoId, s"IOMSHRs must be FIFO for all regions with effects, but HellaCache sees ${m.nodePath.map(_.name)}") } def widthMap[T <: Data](f: Int => T) = VecInit((0 until memWidth).map(f)) val t_replay :: t_probe :: t_wb :: t_mshr_meta_read :: t_lsu :: t_prefetch :: Nil = Enum(6) val wb = Module(new BoomWritebackUnit) val prober = Module(new BoomProbeUnit) val mshrs = Module(new BoomMSHRFile) mshrs.io.clear_all := io.lsu.force_order mshrs.io.brupdate := io.lsu.brupdate mshrs.io.exception := io.lsu.exception mshrs.io.rob_pnr_idx := io.lsu.rob_pnr_idx mshrs.io.rob_head_idx := io.lsu.rob_head_idx // tags def onReset = L1Metadata(0.U, ClientMetadata.onReset) val meta = Seq.fill(memWidth) { Module(new L1MetadataArray(onReset _)) } val metaWriteArb = Module(new Arbiter(new L1MetaWriteReq, 2)) // 0 goes to MSHR refills, 1 goes to prober val metaReadArb = Module(new Arbiter(new BoomL1MetaReadReq, 6)) // 0 goes to MSHR replays, 1 goes to prober, 2 goes to wb, 3 goes to MSHR meta read, // 4 goes to pipeline, 5 goes to prefetcher metaReadArb.io.in := DontCare for (w <- 0 until memWidth) { meta(w).io.write.valid := metaWriteArb.io.out.fire meta(w).io.write.bits := metaWriteArb.io.out.bits meta(w).io.read.valid := metaReadArb.io.out.valid meta(w).io.read.bits := metaReadArb.io.out.bits.req(w) } metaReadArb.io.out.ready := meta.map(_.io.read.ready).reduce(_||_) metaWriteArb.io.out.ready := meta.map(_.io.write.ready).reduce(_||_) // data val data = Module(if (boomParams.numDCacheBanks == 1) new BoomDuplicatedDataArray else new BoomBankedDataArray) val dataWriteArb = Module(new Arbiter(new L1DataWriteReq, 2)) // 0 goes to pipeline, 1 goes to MSHR refills val dataReadArb = Module(new Arbiter(new BoomL1DataReadReq, 3)) // 0 goes to MSHR replays, 1 goes to wb, 2 goes to pipeline dataReadArb.io.in := DontCare for (w <- 0 until memWidth) { data.io.read(w).valid := dataReadArb.io.out.bits.valid(w) && dataReadArb.io.out.valid data.io.read(w).bits := dataReadArb.io.out.bits.req(w) } dataReadArb.io.out.ready := true.B data.io.write.valid := dataWriteArb.io.out.fire data.io.write.bits := dataWriteArb.io.out.bits dataWriteArb.io.out.ready := true.B // ------------ // New requests io.lsu.req.ready := metaReadArb.io.in(4).ready && dataReadArb.io.in(2).ready metaReadArb.io.in(4).valid := io.lsu.req.valid dataReadArb.io.in(2).valid := io.lsu.req.valid for (w <- 0 until memWidth) { // Tag read for new requests metaReadArb.io.in(4).bits.req(w).idx := io.lsu.req.bits(w).bits.addr >> blockOffBits metaReadArb.io.in(4).bits.req(w).way_en := DontCare metaReadArb.io.in(4).bits.req(w).tag := DontCare // Data read for new requests dataReadArb.io.in(2).bits.valid(w) := io.lsu.req.bits(w).valid dataReadArb.io.in(2).bits.req(w).addr := io.lsu.req.bits(w).bits.addr dataReadArb.io.in(2).bits.req(w).way_en := ~0.U(nWays.W) } // ------------ // MSHR Replays val replay_req = Wire(Vec(memWidth, new BoomDCacheReq)) replay_req := DontCare replay_req(0).uop := mshrs.io.replay.bits.uop replay_req(0).addr := mshrs.io.replay.bits.addr replay_req(0).data := mshrs.io.replay.bits.data replay_req(0).is_hella := mshrs.io.replay.bits.is_hella mshrs.io.replay.ready := metaReadArb.io.in(0).ready && dataReadArb.io.in(0).ready // Tag read for MSHR replays // We don't actually need to read the metadata, for replays we already know our way metaReadArb.io.in(0).valid := mshrs.io.replay.valid metaReadArb.io.in(0).bits.req(0).idx := mshrs.io.replay.bits.addr >> blockOffBits metaReadArb.io.in(0).bits.req(0).way_en := DontCare metaReadArb.io.in(0).bits.req(0).tag := DontCare // Data read for MSHR replays dataReadArb.io.in(0).valid := mshrs.io.replay.valid dataReadArb.io.in(0).bits.req(0).addr := mshrs.io.replay.bits.addr dataReadArb.io.in(0).bits.req(0).way_en := mshrs.io.replay.bits.way_en dataReadArb.io.in(0).bits.valid := widthMap(w => (w == 0).B) // ----------- // MSHR Meta read val mshr_read_req = Wire(Vec(memWidth, new BoomDCacheReq)) mshr_read_req := DontCare mshr_read_req(0).uop := NullMicroOp mshr_read_req(0).addr := Cat(mshrs.io.meta_read.bits.tag, mshrs.io.meta_read.bits.idx) << blockOffBits mshr_read_req(0).data := DontCare mshr_read_req(0).is_hella := false.B metaReadArb.io.in(3).valid := mshrs.io.meta_read.valid metaReadArb.io.in(3).bits.req(0) := mshrs.io.meta_read.bits mshrs.io.meta_read.ready := metaReadArb.io.in(3).ready // ----------- // Write-backs val wb_fire = wb.io.meta_read.fire && wb.io.data_req.fire val wb_req = Wire(Vec(memWidth, new BoomDCacheReq)) wb_req := DontCare wb_req(0).uop := NullMicroOp wb_req(0).addr := Cat(wb.io.meta_read.bits.tag, wb.io.data_req.bits.addr) wb_req(0).data := DontCare wb_req(0).is_hella := false.B // Couple the two decoupled interfaces of the WBUnit's meta_read and data_read // Tag read for write-back metaReadArb.io.in(2).valid := wb.io.meta_read.valid metaReadArb.io.in(2).bits.req(0) := wb.io.meta_read.bits wb.io.meta_read.ready := metaReadArb.io.in(2).ready && dataReadArb.io.in(1).ready // Data read for write-back dataReadArb.io.in(1).valid := wb.io.data_req.valid dataReadArb.io.in(1).bits.req(0) := wb.io.data_req.bits dataReadArb.io.in(1).bits.valid := widthMap(w => (w == 0).B) wb.io.data_req.ready := metaReadArb.io.in(2).ready && dataReadArb.io.in(1).ready assert(!(wb.io.meta_read.fire ^ wb.io.data_req.fire)) // ------- // Prober val prober_fire = prober.io.meta_read.fire val prober_req = Wire(Vec(memWidth, new BoomDCacheReq)) prober_req := DontCare prober_req(0).uop := NullMicroOp prober_req(0).addr := Cat(prober.io.meta_read.bits.tag, prober.io.meta_read.bits.idx) << blockOffBits prober_req(0).data := DontCare prober_req(0).is_hella := false.B // Tag read for prober metaReadArb.io.in(1).valid := prober.io.meta_read.valid metaReadArb.io.in(1).bits.req(0) := prober.io.meta_read.bits prober.io.meta_read.ready := metaReadArb.io.in(1).ready // Prober does not need to read data array // ------- // Prefetcher val prefetch_fire = mshrs.io.prefetch.fire val prefetch_req = Wire(Vec(memWidth, new BoomDCacheReq)) prefetch_req := DontCare prefetch_req(0) := mshrs.io.prefetch.bits // Tag read for prefetch metaReadArb.io.in(5).valid := mshrs.io.prefetch.valid metaReadArb.io.in(5).bits.req(0).idx := mshrs.io.prefetch.bits.addr >> blockOffBits metaReadArb.io.in(5).bits.req(0).way_en := DontCare metaReadArb.io.in(5).bits.req(0).tag := DontCare mshrs.io.prefetch.ready := metaReadArb.io.in(5).ready // Prefetch does not need to read data array val s0_valid = Mux(io.lsu.req.fire, VecInit(io.lsu.req.bits.map(_.valid)), Mux(mshrs.io.replay.fire || wb_fire || prober_fire || prefetch_fire || mshrs.io.meta_read.fire, VecInit(1.U(memWidth.W).asBools), VecInit(0.U(memWidth.W).asBools))) val s0_req = Mux(io.lsu.req.fire , VecInit(io.lsu.req.bits.map(_.bits)), Mux(wb_fire , wb_req, Mux(prober_fire , prober_req, Mux(prefetch_fire , prefetch_req, Mux(mshrs.io.meta_read.fire, mshr_read_req , replay_req))))) val s0_type = Mux(io.lsu.req.fire , t_lsu, Mux(wb_fire , t_wb, Mux(prober_fire , t_probe, Mux(prefetch_fire , t_prefetch, Mux(mshrs.io.meta_read.fire, t_mshr_meta_read , t_replay))))) // Does this request need to send a response or nack val s0_send_resp_or_nack = Mux(io.lsu.req.fire, s0_valid, VecInit(Mux(mshrs.io.replay.fire && isRead(mshrs.io.replay.bits.uop.mem_cmd), 1.U(memWidth.W), 0.U(memWidth.W)).asBools)) val s1_req = RegNext(s0_req) for (w <- 0 until memWidth) s1_req(w).uop.br_mask := GetNewBrMask(io.lsu.brupdate, s0_req(w).uop) val s2_store_failed = Wire(Bool()) val s1_valid = widthMap(w => RegNext(s0_valid(w) && !IsKilledByBranch(io.lsu.brupdate, s0_req(w).uop) && !(io.lsu.exception && s0_req(w).uop.uses_ldq) && !(s2_store_failed && io.lsu.req.fire && s0_req(w).uop.uses_stq), init=false.B)) for (w <- 0 until memWidth) assert(!(io.lsu.s1_kill(w) && !RegNext(io.lsu.req.fire) && !RegNext(io.lsu.req.bits(w).valid))) val s1_addr = s1_req.map(_.addr) val s1_nack = s1_addr.map(a => a(idxMSB,idxLSB) === prober.io.meta_write.bits.idx && !prober.io.req.ready) val s1_send_resp_or_nack = RegNext(s0_send_resp_or_nack) val s1_type = RegNext(s0_type) val s1_mshr_meta_read_way_en = RegNext(mshrs.io.meta_read.bits.way_en) val s1_replay_way_en = RegNext(mshrs.io.replay.bits.way_en) // For replays, the metadata isn't written yet val s1_wb_way_en = RegNext(wb.io.data_req.bits.way_en) // tag check def wayMap[T <: Data](f: Int => T) = VecInit((0 until nWays).map(f)) val s1_tag_eq_way = widthMap(i => wayMap((w: Int) => meta(i).io.resp(w).tag === (s1_addr(i) >> untagBits)).asUInt) val s1_tag_match_way = widthMap(i => Mux(s1_type === t_replay, s1_replay_way_en, Mux(s1_type === t_wb, s1_wb_way_en, Mux(s1_type === t_mshr_meta_read, s1_mshr_meta_read_way_en, wayMap((w: Int) => s1_tag_eq_way(i)(w) && meta(i).io.resp(w).coh.isValid()).asUInt)))) val s1_wb_idx_matches = widthMap(i => (s1_addr(i)(untagBits-1,blockOffBits) === wb.io.idx.bits) && wb.io.idx.valid) val s2_req = RegNext(s1_req) val s2_type = RegNext(s1_type) val s2_valid = widthMap(w => RegNext(s1_valid(w) && !io.lsu.s1_kill(w) && !IsKilledByBranch(io.lsu.brupdate, s1_req(w).uop) && !(io.lsu.exception && s1_req(w).uop.uses_ldq) && !(s2_store_failed && (s1_type === t_lsu) && s1_req(w).uop.uses_stq))) for (w <- 0 until memWidth) s2_req(w).uop.br_mask := GetNewBrMask(io.lsu.brupdate, s1_req(w).uop) val s2_tag_match_way = RegNext(s1_tag_match_way) val s2_tag_match = s2_tag_match_way.map(_.orR) val s2_hit_state = widthMap(i => Mux1H(s2_tag_match_way(i), wayMap((w: Int) => RegNext(meta(i).io.resp(w).coh)))) val s2_has_permission = widthMap(w => s2_hit_state(w).onAccess(s2_req(w).uop.mem_cmd)._1) val s2_new_hit_state = widthMap(w => s2_hit_state(w).onAccess(s2_req(w).uop.mem_cmd)._3) val s2_hit = widthMap(w => (s2_tag_match(w) && s2_has_permission(w) && s2_hit_state(w) === s2_new_hit_state(w) && !mshrs.io.block_hit(w)) || s2_type.isOneOf(t_replay, t_wb)) val s2_nack = Wire(Vec(memWidth, Bool())) assert(!(s2_type === t_replay && !s2_hit(0)), "Replays should always hit") assert(!(s2_type === t_wb && !s2_hit(0)), "Writeback should always see data hit") val s2_wb_idx_matches = RegNext(s1_wb_idx_matches) // lr/sc val debug_sc_fail_addr = RegInit(0.U) val debug_sc_fail_cnt = RegInit(0.U(8.W)) val lrsc_count = RegInit(0.U(log2Ceil(lrscCycles).W)) val lrsc_valid = lrsc_count > lrscBackoff.U val lrsc_addr = Reg(UInt()) val s2_lr = s2_req(0).uop.mem_cmd === M_XLR && (!RegNext(s1_nack(0)) || s2_type === t_replay) val s2_sc = s2_req(0).uop.mem_cmd === M_XSC && (!RegNext(s1_nack(0)) || s2_type === t_replay) val s2_lrsc_addr_match = widthMap(w => lrsc_valid && lrsc_addr === (s2_req(w).addr >> blockOffBits)) val s2_sc_fail = s2_sc && !s2_lrsc_addr_match(0) when (lrsc_count > 0.U) { lrsc_count := lrsc_count - 1.U } when (s2_valid(0) && ((s2_type === t_lsu && s2_hit(0) && !s2_nack(0)) || (s2_type === t_replay && s2_req(0).uop.mem_cmd =/= M_FLUSH_ALL))) { when (s2_lr) { lrsc_count := (lrscCycles - 1).U lrsc_addr := s2_req(0).addr >> blockOffBits } when (lrsc_count > 0.U) { lrsc_count := 0.U } } for (w <- 0 until memWidth) { when (s2_valid(w) && s2_type === t_lsu && !s2_hit(w) && !(s2_has_permission(w) && s2_tag_match(w)) && s2_lrsc_addr_match(w) && !s2_nack(w)) { lrsc_count := 0.U } } when (s2_valid(0)) { when (s2_req(0).addr === debug_sc_fail_addr) { when (s2_sc_fail) { debug_sc_fail_cnt := debug_sc_fail_cnt + 1.U } .elsewhen (s2_sc) { debug_sc_fail_cnt := 0.U } } .otherwise { when (s2_sc_fail) { debug_sc_fail_addr := s2_req(0).addr debug_sc_fail_cnt := 1.U } } } assert(debug_sc_fail_cnt < 100.U, "L1DCache failed too many SCs in a row") val s2_data = Wire(Vec(memWidth, Vec(nWays, UInt(encRowBits.W)))) for (i <- 0 until memWidth) { for (w <- 0 until nWays) { s2_data(i)(w) := data.io.resp(i)(w) } } val s2_data_muxed = widthMap(w => Mux1H(s2_tag_match_way(w), s2_data(w))) val s2_word_idx = widthMap(w => if (rowWords == 1) 0.U else s2_req(w).addr(log2Up(rowWords*wordBytes)-1, log2Up(wordBytes))) // replacement policy val replacer = cacheParams.replacement val s1_replaced_way_en = UIntToOH(replacer.way) val s2_replaced_way_en = UIntToOH(RegNext(replacer.way)) val s2_repl_meta = widthMap(i => Mux1H(s2_replaced_way_en, wayMap((w: Int) => RegNext(meta(i).io.resp(w))).toSeq)) // nack because of incoming probe val s2_nack_hit = RegNext(VecInit(s1_nack)) // Nack when we hit something currently being evicted val s2_nack_victim = widthMap(w => s2_valid(w) && s2_hit(w) && mshrs.io.secondary_miss(w)) // MSHRs not ready for request val s2_nack_miss = widthMap(w => s2_valid(w) && !s2_hit(w) && !mshrs.io.req(w).ready) // Bank conflict on data arrays val s2_nack_data = widthMap(w => data.io.nacks(w)) // Can't allocate MSHR for same set currently being written back val s2_nack_wb = widthMap(w => s2_valid(w) && !s2_hit(w) && s2_wb_idx_matches(w)) s2_nack := widthMap(w => (s2_nack_miss(w) || s2_nack_hit(w) || s2_nack_victim(w) || s2_nack_data(w) || s2_nack_wb(w)) && s2_type =/= t_replay) val s2_send_resp = widthMap(w => (RegNext(s1_send_resp_or_nack(w)) && !s2_nack(w) && (s2_hit(w) || (mshrs.io.req(w).fire && isWrite(s2_req(w).uop.mem_cmd) && !isRead(s2_req(w).uop.mem_cmd))))) val s2_send_nack = widthMap(w => (RegNext(s1_send_resp_or_nack(w)) && s2_nack(w))) for (w <- 0 until memWidth) assert(!(s2_send_resp(w) && s2_send_nack(w))) // hits always send a response // If MSHR is not available, LSU has to replay this request later // If MSHR is available and this is only a store(not a amo), we don't need to wait for resp later s2_store_failed := s2_valid(0) && s2_nack(0) && s2_send_nack(0) && s2_req(0).uop.uses_stq // Miss handling for (w <- 0 until memWidth) { mshrs.io.req(w).valid := s2_valid(w) && !s2_hit(w) && !s2_nack_hit(w) && !s2_nack_victim(w) && !s2_nack_data(w) && !s2_nack_wb(w) && s2_type.isOneOf(t_lsu, t_prefetch) && !IsKilledByBranch(io.lsu.brupdate, s2_req(w).uop) && !(io.lsu.exception && s2_req(w).uop.uses_ldq) && (isPrefetch(s2_req(w).uop.mem_cmd) || isRead(s2_req(w).uop.mem_cmd) || isWrite(s2_req(w).uop.mem_cmd)) assert(!(mshrs.io.req(w).valid && s2_type === t_replay), "Replays should not need to go back into MSHRs") mshrs.io.req(w).bits := DontCare mshrs.io.req(w).bits.uop := s2_req(w).uop mshrs.io.req(w).bits.uop.br_mask := GetNewBrMask(io.lsu.brupdate, s2_req(w).uop) mshrs.io.req(w).bits.addr := s2_req(w).addr mshrs.io.req(w).bits.tag_match := s2_tag_match(w) mshrs.io.req(w).bits.old_meta := Mux(s2_tag_match(w), L1Metadata(s2_repl_meta(w).tag, s2_hit_state(w)), s2_repl_meta(w)) mshrs.io.req(w).bits.way_en := Mux(s2_tag_match(w), s2_tag_match_way(w), s2_replaced_way_en) mshrs.io.req(w).bits.data := s2_req(w).data mshrs.io.req(w).bits.is_hella := s2_req(w).is_hella mshrs.io.req_is_probe(w) := s2_type === t_probe && s2_valid(w) } mshrs.io.meta_resp.valid := !s2_nack_hit(0) || prober.io.mshr_wb_rdy mshrs.io.meta_resp.bits := Mux1H(s2_tag_match_way(0), RegNext(meta(0).io.resp)) when (mshrs.io.req.map(_.fire).reduce(_||_)) { replacer.miss } tl_out.a <> mshrs.io.mem_acquire // probes and releases prober.io.req.valid := tl_out.b.valid && !lrsc_valid tl_out.b.ready := prober.io.req.ready && !lrsc_valid prober.io.req.bits := tl_out.b.bits prober.io.way_en := s2_tag_match_way(0) prober.io.block_state := s2_hit_state(0) metaWriteArb.io.in(1) <> prober.io.meta_write prober.io.mshr_rdy := mshrs.io.probe_rdy prober.io.wb_rdy := (prober.io.meta_write.bits.idx =/= wb.io.idx.bits) || !wb.io.idx.valid mshrs.io.prober_state := prober.io.state // refills when (tl_out.d.bits.source === cfg.nMSHRs.U) { // This should be ReleaseAck tl_out.d.ready := true.B mshrs.io.mem_grant.valid := false.B mshrs.io.mem_grant.bits := DontCare } .otherwise { // This should be GrantData mshrs.io.mem_grant <> tl_out.d } dataWriteArb.io.in(1) <> mshrs.io.refill metaWriteArb.io.in(0) <> mshrs.io.meta_write tl_out.e <> mshrs.io.mem_finish // writebacks val wbArb = Module(new Arbiter(new WritebackReq(edge.bundle), 2)) // 0 goes to prober, 1 goes to MSHR evictions wbArb.io.in(0) <> prober.io.wb_req wbArb.io.in(1) <> mshrs.io.wb_req wb.io.req <> wbArb.io.out wb.io.data_resp := s2_data_muxed(0) mshrs.io.wb_resp := wb.io.resp wb.io.mem_grant := tl_out.d.fire && tl_out.d.bits.source === cfg.nMSHRs.U val lsu_release_arb = Module(new Arbiter(new TLBundleC(edge.bundle), 2)) io.lsu.release <> lsu_release_arb.io.out lsu_release_arb.io.in(0) <> wb.io.lsu_release lsu_release_arb.io.in(1) <> prober.io.lsu_release TLArbiter.lowest(edge, tl_out.c, wb.io.release, prober.io.rep) io.lsu.perf.release := edge.done(tl_out.c) io.lsu.perf.acquire := edge.done(tl_out.a) // load data gen val s2_data_word_prebypass = widthMap(w => s2_data_muxed(w) >> Cat(s2_word_idx(w), 0.U(log2Ceil(coreDataBits).W))) val s2_data_word = Wire(Vec(memWidth, UInt())) val loadgen = (0 until memWidth).map { w => new LoadGen(s2_req(w).uop.mem_size, s2_req(w).uop.mem_signed, s2_req(w).addr, s2_data_word(w), s2_sc && (w == 0).B, wordBytes) } // Mux between cache responses and uncache responses val cache_resp = Wire(Vec(memWidth, Valid(new BoomDCacheResp))) for (w <- 0 until memWidth) { cache_resp(w).valid := s2_valid(w) && s2_send_resp(w) cache_resp(w).bits.uop := s2_req(w).uop cache_resp(w).bits.data := loadgen(w).data | s2_sc_fail cache_resp(w).bits.is_hella := s2_req(w).is_hella } val uncache_resp = Wire(Valid(new BoomDCacheResp)) uncache_resp.bits := mshrs.io.resp.bits uncache_resp.valid := mshrs.io.resp.valid mshrs.io.resp.ready := !(cache_resp.map(_.valid).reduce(_&&_)) // We can backpressure the MSHRs, but not cache hits val resp = WireInit(cache_resp) var uncache_responding = false.B for (w <- 0 until memWidth) { val uncache_respond = !cache_resp(w).valid && !uncache_responding when (uncache_respond) { resp(w) := uncache_resp } uncache_responding = uncache_responding || uncache_respond } for (w <- 0 until memWidth) { io.lsu.resp(w).valid := resp(w).valid && !(io.lsu.exception && resp(w).bits.uop.uses_ldq) && !IsKilledByBranch(io.lsu.brupdate, resp(w).bits.uop) io.lsu.resp(w).bits := UpdateBrMask(io.lsu.brupdate, resp(w).bits) io.lsu.nack(w).valid := s2_valid(w) && s2_send_nack(w) && !(io.lsu.exception && s2_req(w).uop.uses_ldq) && !IsKilledByBranch(io.lsu.brupdate, s2_req(w).uop) io.lsu.nack(w).bits := UpdateBrMask(io.lsu.brupdate, s2_req(w)) assert(!(io.lsu.nack(w).valid && s2_type =/= t_lsu)) } // Store/amo hits val s3_req = RegNext(s2_req(0)) val s3_valid = RegNext(s2_valid(0) && s2_hit(0) && isWrite(s2_req(0).uop.mem_cmd) && !s2_sc_fail && !(s2_send_nack(0) && s2_nack(0))) for (w <- 1 until memWidth) { assert(!(s2_valid(w) && s2_hit(w) && isWrite(s2_req(w).uop.mem_cmd) && !s2_sc_fail && !(s2_send_nack(w) && s2_nack(w))), "Store must go through 0th pipe in L1D") } // For bypassing val s4_req = RegNext(s3_req) val s4_valid = RegNext(s3_valid) val s5_req = RegNext(s4_req) val s5_valid = RegNext(s4_valid) val s3_bypass = widthMap(w => s3_valid && ((s2_req(w).addr >> wordOffBits) === (s3_req.addr >> wordOffBits))) val s4_bypass = widthMap(w => s4_valid && ((s2_req(w).addr >> wordOffBits) === (s4_req.addr >> wordOffBits))) val s5_bypass = widthMap(w => s5_valid && ((s2_req(w).addr >> wordOffBits) === (s5_req.addr >> wordOffBits))) // Store -> Load bypassing for (w <- 0 until memWidth) { s2_data_word(w) := Mux(s3_bypass(w), s3_req.data, Mux(s4_bypass(w), s4_req.data, Mux(s5_bypass(w), s5_req.data, s2_data_word_prebypass(w)))) } val amoalu = Module(new AMOALU(xLen)) amoalu.io.mask := new StoreGen(s2_req(0).uop.mem_size, s2_req(0).addr, 0.U, xLen/8).mask amoalu.io.cmd := s2_req(0).uop.mem_cmd amoalu.io.lhs := s2_data_word(0) amoalu.io.rhs := s2_req(0).data s3_req.data := amoalu.io.out val s3_way = RegNext(s2_tag_match_way(0)) dataWriteArb.io.in(0).valid := s3_valid dataWriteArb.io.in(0).bits.addr := s3_req.addr dataWriteArb.io.in(0).bits.wmask := UIntToOH(s3_req.addr.extract(rowOffBits-1,offsetlsb)) dataWriteArb.io.in(0).bits.data := Fill(rowWords, s3_req.data) dataWriteArb.io.in(0).bits.way_en := s3_way io.lsu.ordered := mshrs.io.fence_rdy && !s1_valid.reduce(_||_) && !s2_valid.reduce(_||_) } File Replacement.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import freechips.rocketchip.util.property.cover abstract class ReplacementPolicy { def nBits: Int def perSet: Boolean def way: UInt def miss: Unit def hit: Unit def access(touch_way: UInt): Unit def access(touch_ways: Seq[Valid[UInt]]): Unit def state_read: UInt def get_next_state(state: UInt, touch_way: UInt): UInt def get_next_state(state: UInt, touch_ways: Seq[Valid[UInt]]): UInt = { touch_ways.foldLeft(state)((prev, touch_way) => Mux(touch_way.valid, get_next_state(prev, touch_way.bits), prev)) } def get_replace_way(state: UInt): UInt } object ReplacementPolicy { def fromString(s: String, n_ways: Int): ReplacementPolicy = s.toLowerCase match { case "random" => new RandomReplacement(n_ways) case "lru" => new TrueLRU(n_ways) case "plru" => new PseudoLRU(n_ways) case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t") } } class RandomReplacement(n_ways: Int) extends ReplacementPolicy { private val replace = Wire(Bool()) replace := false.B def nBits = 16 def perSet = false private val lfsr = LFSR(nBits, replace) def state_read = WireDefault(lfsr) def way = Random(n_ways, lfsr) def miss = replace := true.B def hit = {} def access(touch_way: UInt) = {} def access(touch_ways: Seq[Valid[UInt]]) = {} def get_next_state(state: UInt, touch_way: UInt) = 0.U //DontCare def get_replace_way(state: UInt) = way } abstract class SeqReplacementPolicy { def access(set: UInt): Unit def update(valid: Bool, hit: Bool, set: UInt, way: UInt): Unit def way: UInt } abstract class SetAssocReplacementPolicy { def access(set: UInt, touch_way: UInt): Unit def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]): Unit def way(set: UInt): UInt } class SeqRandom(n_ways: Int) extends SeqReplacementPolicy { val logic = new RandomReplacement(n_ways) def access(set: UInt) = { } def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = { when (valid && !hit) { logic.miss } } def way = logic.way } class TrueLRU(n_ways: Int) extends ReplacementPolicy { // True LRU replacement policy, using a triangular matrix to track which sets are more recently used than others. // The matrix is packed into a single UInt (or Bits). Example 4-way (6-bits): // [5] - 3 more recent than 2 // [4] - 3 more recent than 1 // [3] - 2 more recent than 1 // [2] - 3 more recent than 0 // [1] - 2 more recent than 0 // [0] - 1 more recent than 0 def nBits = (n_ways * (n_ways-1)) / 2 def perSet = true private val state_reg = RegInit(0.U(nBits.W)) def state_read = WireDefault(state_reg) private def extractMRUVec(state: UInt): Seq[UInt] = { // Extract per-way information about which higher-indexed ways are more recently used val moreRecentVec = Wire(Vec(n_ways-1, UInt(n_ways.W))) var lsb = 0 for (i <- 0 until n_ways-1) { moreRecentVec(i) := Cat(state(lsb+n_ways-i-2,lsb), 0.U((i+1).W)) lsb = lsb + (n_ways - i - 1) } moreRecentVec } def get_next_state(state: UInt, touch_way: UInt): UInt = { val nextState = Wire(Vec(n_ways-1, UInt(n_ways.W))) val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix val wayDec = UIntToOH(touch_way, n_ways) // Compute next value of triangular matrix // set the touched way as more recent than every other way nextState.zipWithIndex.map { case (e, i) => e := Mux(i.U === touch_way, 0.U(n_ways.W), moreRecentVec(i) | wayDec) } nextState.zipWithIndex.tail.foldLeft((nextState.head.apply(n_ways-1,1),0)) { case ((pe,pi),(ce,ci)) => (Cat(ce.apply(n_ways-1,ci+1), pe), ci) }._1 } def access(touch_way: UInt): Unit = { state_reg := get_next_state(state_reg, touch_way) } def access(touch_ways: Seq[Valid[UInt]]): Unit = { when (touch_ways.map(_.valid).orR) { state_reg := get_next_state(state_reg, touch_ways) } for (i <- 1 until touch_ways.size) { cover(PopCount(touch_ways.map(_.valid)) === i.U, s"LRU_UpdateCount$i", s"LRU Update $i simultaneous") } } def get_replace_way(state: UInt): UInt = { val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix // For each way, determine if all other ways are more recent val mruWayDec = (0 until n_ways).map { i => val upperMoreRecent = (if (i == n_ways-1) true.B else moreRecentVec(i).apply(n_ways-1,i+1).andR) val lowerMoreRecent = (if (i == 0) true.B else moreRecentVec.map(e => !e(i)).reduce(_ && _)) upperMoreRecent && lowerMoreRecent } OHToUInt(mruWayDec) } def way = get_replace_way(state_reg) def miss = access(way) def hit = {} @deprecated("replace 'replace' with 'way' from abstract class ReplacementPolicy","Rocket Chip 2020.05") def replace: UInt = way } class PseudoLRU(n_ways: Int) extends ReplacementPolicy { // Pseudo-LRU tree algorithm: https://en.wikipedia.org/wiki/Pseudo-LRU#Tree-PLRU // // // - bits storage example for 4-way PLRU binary tree: // bit[2]: ways 3+2 older than ways 1+0 // / \ // bit[1]: way 3 older than way 2 bit[0]: way 1 older than way 0 // // // - bits storage example for 3-way PLRU binary tree: // bit[1]: way 2 older than ways 1+0 // \ // bit[0]: way 1 older than way 0 // // // - bits storage example for 8-way PLRU binary tree: // bit[6]: ways 7-4 older than ways 3-0 // / \ // bit[5]: ways 7+6 > 5+4 bit[2]: ways 3+2 > 1+0 // / \ / \ // bit[4]: way 7>6 bit[3]: way 5>4 bit[1]: way 3>2 bit[0]: way 1>0 def nBits = n_ways - 1 def perSet = true private val state_reg = if (nBits == 0) Reg(UInt(0.W)) else RegInit(0.U(nBits.W)) def state_read = WireDefault(state_reg) def access(touch_way: UInt): Unit = { state_reg := get_next_state(state_reg, touch_way) } def access(touch_ways: Seq[Valid[UInt]]): Unit = { when (touch_ways.map(_.valid).orR) { state_reg := get_next_state(state_reg, touch_ways) } for (i <- 1 until touch_ways.size) { cover(PopCount(touch_ways.map(_.valid)) === i.U, s"PLRU_UpdateCount$i", s"PLRU Update $i simultaneous") } } /** @param state state_reg bits for this sub-tree * @param touch_way touched way encoded value bits for this sub-tree * @param tree_nways number of ways in this sub-tree */ def get_next_state(state: UInt, touch_way: UInt, tree_nways: Int): UInt = { require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways") require(touch_way.getWidth == (log2Ceil(tree_nways) max 1), s"wrong encoded way width ${touch_way.getWidth} for $tree_nways ways") if (tree_nways > 2) { // we are at a branching node in the tree, so recurse val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree val set_left_older = !touch_way(log2Ceil(tree_nways)-1) val left_subtree_state = state.extract(tree_nways-3, right_nways-1) val right_subtree_state = state(right_nways-2, 0) if (left_nways > 1) { // we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees Cat(set_left_older, Mux(set_left_older, left_subtree_state, // if setting left sub-tree as older, do NOT recurse into left sub-tree get_next_state(left_subtree_state, touch_way.extract(log2Ceil(left_nways)-1,0), left_nways)), // recurse left if newer Mux(set_left_older, get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree } else { // we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree Cat(set_left_older, Mux(set_left_older, get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree } } else if (tree_nways == 2) { // we are at a leaf node at the end of the tree, so set the single state bit opposite of the lsb of the touched way encoded value !touch_way(0) } else { // tree_nways <= 1 // we are at an empty node in an empty tree for 1 way, so return single zero bit for Chisel (no zero-width wires) 0.U(1.W) } } def get_next_state(state: UInt, touch_way: UInt): UInt = { val touch_way_sized = if (touch_way.getWidth < log2Ceil(n_ways)) touch_way.padTo (log2Ceil(n_ways)) else touch_way.extract(log2Ceil(n_ways)-1,0) get_next_state(state, touch_way_sized, n_ways) } /** @param state state_reg bits for this sub-tree * @param tree_nways number of ways in this sub-tree */ def get_replace_way(state: UInt, tree_nways: Int): UInt = { require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways") // this algorithm recursively descends the binary tree, filling in the way-to-replace encoded value from msb to lsb if (tree_nways > 2) { // we are at a branching node in the tree, so recurse val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree val left_subtree_older = state(tree_nways-2) val left_subtree_state = state.extract(tree_nways-3, right_nways-1) val right_subtree_state = state(right_nways-2, 0) if (left_nways > 1) { // we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value Mux(left_subtree_older, // if left sub-tree is older, recurse left, else recurse right get_replace_way(left_subtree_state, left_nways), // recurse left get_replace_way(right_subtree_state, right_nways))) // recurse right } else { // we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value Mux(left_subtree_older, // if left sub-tree is older, return and do not recurse right 0.U(1.W), get_replace_way(right_subtree_state, right_nways))) // recurse right } } else if (tree_nways == 2) { // we are at a leaf node at the end of the tree, so just return the single state bit as lsb of the way-to-replace encoded value state(0) } else { // tree_nways <= 1 // we are at an empty node in an unbalanced tree for non-power-of-2 ways, so return single zero bit as lsb of the way-to-replace encoded value 0.U(1.W) } } def get_replace_way(state: UInt): UInt = get_replace_way(state, n_ways) def way = get_replace_way(state_reg) def miss = access(way) def hit = {} } class SeqPLRU(n_sets: Int, n_ways: Int) extends SeqReplacementPolicy { val logic = new PseudoLRU(n_ways) val state = SyncReadMem(n_sets, UInt(logic.nBits.W)) val current_state = Wire(UInt(logic.nBits.W)) val next_state = Wire(UInt(logic.nBits.W)) val plru_way = logic.get_replace_way(current_state) def access(set: UInt) = { current_state := state.read(set) } def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = { val update_way = Mux(hit, way, plru_way) next_state := logic.get_next_state(current_state, update_way) when (valid) { state.write(set, next_state) } } def way = plru_way } class SetAssocLRU(n_sets: Int, n_ways: Int, policy: String) extends SetAssocReplacementPolicy { val logic = policy.toLowerCase match { case "plru" => new PseudoLRU(n_ways) case "lru" => new TrueLRU(n_ways) case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t") } val state_vec = if (logic.nBits == 0) Reg(Vec(n_sets, UInt(logic.nBits.W))) // Work around elaboration error on following line else RegInit(VecInit(Seq.fill(n_sets)(0.U(logic.nBits.W)))) def access(set: UInt, touch_way: UInt) = { state_vec(set) := logic.get_next_state(state_vec(set), touch_way) } def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]) = { require(sets.size == touch_ways.size, "internal consistency check: should be same number of simultaneous updates for sets and touch_ways") for (set <- 0 until n_sets) { val set_touch_ways = (sets zip touch_ways).map { case (touch_set, touch_way) => Pipe(touch_way.valid && (touch_set === set.U), touch_way.bits, 0)} when (set_touch_ways.map(_.valid).orR) { state_vec(set) := logic.get_next_state(state_vec(set), set_touch_ways) } } } def way(set: UInt) = logic.get_replace_way(state_vec(set)) } // Synthesizable unit tests import freechips.rocketchip.unittest._ class PLRUTest(n_ways: Int, timeout: Int = 500) extends UnitTest(timeout) { val plru = new PseudoLRU(n_ways) // step io.finished := RegNext(true.B, false.B) val get_replace_ways = (0 until (1 << (n_ways-1))).map(state => plru.get_replace_way(state = state.U((n_ways-1).W))) val get_next_states = (0 until (1 << (n_ways-1))).map(state => (0 until n_ways).map(way => plru.get_next_state (state = state.U((n_ways-1).W), touch_way = way.U(log2Ceil(n_ways).W)))) n_ways match { case 2 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_next_states(0)(0) === 1.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=1 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 0.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=0 actual=%d", get_next_states(0)(1)) assert(get_next_states(1)(0) === 1.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=1 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 0.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=0 actual=%d", get_next_states(1)(1)) } case 3 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_replace_ways(2) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=2 actual=%d", get_replace_ways(2)) assert(get_replace_ways(3) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=2 actual=%d", get_replace_ways(3)) assert(get_next_states(0)(0) === 3.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=3 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 2.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=2 actual=%d", get_next_states(0)(1)) assert(get_next_states(0)(2) === 0.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=0 actual=%d", get_next_states(0)(2)) assert(get_next_states(1)(0) === 3.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=3 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 2.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=2 actual=%d", get_next_states(1)(1)) assert(get_next_states(1)(2) === 1.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=1 actual=%d", get_next_states(1)(2)) assert(get_next_states(2)(0) === 3.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=3 actual=%d", get_next_states(2)(0)) assert(get_next_states(2)(1) === 2.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=2 actual=%d", get_next_states(2)(1)) assert(get_next_states(2)(2) === 0.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=0 actual=%d", get_next_states(2)(2)) assert(get_next_states(3)(0) === 3.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=3 actual=%d", get_next_states(3)(0)) assert(get_next_states(3)(1) === 2.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=2 actual=%d", get_next_states(3)(1)) assert(get_next_states(3)(2) === 1.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=1 actual=%d", get_next_states(3)(2)) } case 4 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_replace_ways(2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=0 actual=%d", get_replace_ways(2)) assert(get_replace_ways(3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=1 actual=%d", get_replace_ways(3)) assert(get_replace_ways(4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=4: expected=2 actual=%d", get_replace_ways(4)) assert(get_replace_ways(5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=5: expected=2 actual=%d", get_replace_ways(5)) assert(get_replace_ways(6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=6: expected=3 actual=%d", get_replace_ways(6)) assert(get_replace_ways(7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=7: expected=3 actual=%d", get_replace_ways(7)) assert(get_next_states(0)(0) === 5.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=5 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 4.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=4 actual=%d", get_next_states(0)(1)) assert(get_next_states(0)(2) === 2.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=2 actual=%d", get_next_states(0)(2)) assert(get_next_states(0)(3) === 0.U(plru.nBits.W), s"get_next_state state=0 way=3: expected=0 actual=%d", get_next_states(0)(3)) assert(get_next_states(1)(0) === 5.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=5 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 4.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=4 actual=%d", get_next_states(1)(1)) assert(get_next_states(1)(2) === 3.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=3 actual=%d", get_next_states(1)(2)) assert(get_next_states(1)(3) === 1.U(plru.nBits.W), s"get_next_state state=1 way=3: expected=1 actual=%d", get_next_states(1)(3)) assert(get_next_states(2)(0) === 7.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=7 actual=%d", get_next_states(2)(0)) assert(get_next_states(2)(1) === 6.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=6 actual=%d", get_next_states(2)(1)) assert(get_next_states(2)(2) === 2.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=2 actual=%d", get_next_states(2)(2)) assert(get_next_states(2)(3) === 0.U(plru.nBits.W), s"get_next_state state=2 way=3: expected=0 actual=%d", get_next_states(2)(3)) assert(get_next_states(3)(0) === 7.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=7 actual=%d", get_next_states(3)(0)) assert(get_next_states(3)(1) === 6.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=6 actual=%d", get_next_states(3)(1)) assert(get_next_states(3)(2) === 3.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=3 actual=%d", get_next_states(3)(2)) assert(get_next_states(3)(3) === 1.U(plru.nBits.W), s"get_next_state state=3 way=3: expected=1 actual=%d", get_next_states(3)(3)) assert(get_next_states(4)(0) === 5.U(plru.nBits.W), s"get_next_state state=4 way=0: expected=5 actual=%d", get_next_states(4)(0)) assert(get_next_states(4)(1) === 4.U(plru.nBits.W), s"get_next_state state=4 way=1: expected=4 actual=%d", get_next_states(4)(1)) assert(get_next_states(4)(2) === 2.U(plru.nBits.W), s"get_next_state state=4 way=2: expected=2 actual=%d", get_next_states(4)(2)) assert(get_next_states(4)(3) === 0.U(plru.nBits.W), s"get_next_state state=4 way=3: expected=0 actual=%d", get_next_states(4)(3)) assert(get_next_states(5)(0) === 5.U(plru.nBits.W), s"get_next_state state=5 way=0: expected=5 actual=%d", get_next_states(5)(0)) assert(get_next_states(5)(1) === 4.U(plru.nBits.W), s"get_next_state state=5 way=1: expected=4 actual=%d", get_next_states(5)(1)) assert(get_next_states(5)(2) === 3.U(plru.nBits.W), s"get_next_state state=5 way=2: expected=3 actual=%d", get_next_states(5)(2)) assert(get_next_states(5)(3) === 1.U(plru.nBits.W), s"get_next_state state=5 way=3: expected=1 actual=%d", get_next_states(5)(3)) assert(get_next_states(6)(0) === 7.U(plru.nBits.W), s"get_next_state state=6 way=0: expected=7 actual=%d", get_next_states(6)(0)) assert(get_next_states(6)(1) === 6.U(plru.nBits.W), s"get_next_state state=6 way=1: expected=6 actual=%d", get_next_states(6)(1)) assert(get_next_states(6)(2) === 2.U(plru.nBits.W), s"get_next_state state=6 way=2: expected=2 actual=%d", get_next_states(6)(2)) assert(get_next_states(6)(3) === 0.U(plru.nBits.W), s"get_next_state state=6 way=3: expected=0 actual=%d", get_next_states(6)(3)) assert(get_next_states(7)(0) === 7.U(plru.nBits.W), s"get_next_state state=7 way=0: expected=7 actual=%d", get_next_states(7)(0)) assert(get_next_states(7)(1) === 6.U(plru.nBits.W), s"get_next_state state=7 way=5: expected=6 actual=%d", get_next_states(7)(1)) assert(get_next_states(7)(2) === 3.U(plru.nBits.W), s"get_next_state state=7 way=2: expected=3 actual=%d", get_next_states(7)(2)) assert(get_next_states(7)(3) === 1.U(plru.nBits.W), s"get_next_state state=7 way=3: expected=1 actual=%d", get_next_states(7)(3)) } case 5 => { assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0)) assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1)) assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2)) assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3)) assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4)) assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5)) assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6)) assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7)) assert(get_replace_ways( 8) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=4 actual=%d", get_replace_ways( 8)) assert(get_replace_ways( 9) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=4 actual=%d", get_replace_ways( 9)) assert(get_replace_ways(10) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=4 actual=%d", get_replace_ways(10)) assert(get_replace_ways(11) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=4 actual=%d", get_replace_ways(11)) assert(get_replace_ways(12) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=4 actual=%d", get_replace_ways(12)) assert(get_replace_ways(13) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=4 actual=%d", get_replace_ways(13)) assert(get_replace_ways(14) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=4 actual=%d", get_replace_ways(14)) assert(get_replace_ways(15) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=4 actual=%d", get_replace_ways(15)) assert(get_next_states( 0)(0) === 13.U(plru.nBits.W), s"get_next_state state=00 way=0: expected=13 actual=%d", get_next_states( 0)(0)) assert(get_next_states( 0)(1) === 12.U(plru.nBits.W), s"get_next_state state=00 way=1: expected=12 actual=%d", get_next_states( 0)(1)) assert(get_next_states( 0)(2) === 10.U(plru.nBits.W), s"get_next_state state=00 way=2: expected=10 actual=%d", get_next_states( 0)(2)) assert(get_next_states( 0)(3) === 8.U(plru.nBits.W), s"get_next_state state=00 way=3: expected=08 actual=%d", get_next_states( 0)(3)) assert(get_next_states( 0)(4) === 0.U(plru.nBits.W), s"get_next_state state=00 way=4: expected=00 actual=%d", get_next_states( 0)(4)) assert(get_next_states( 1)(0) === 13.U(plru.nBits.W), s"get_next_state state=01 way=0: expected=13 actual=%d", get_next_states( 1)(0)) assert(get_next_states( 1)(1) === 12.U(plru.nBits.W), s"get_next_state state=01 way=1: expected=12 actual=%d", get_next_states( 1)(1)) assert(get_next_states( 1)(2) === 11.U(plru.nBits.W), s"get_next_state state=01 way=2: expected=11 actual=%d", get_next_states( 1)(2)) assert(get_next_states( 1)(3) === 9.U(plru.nBits.W), s"get_next_state state=01 way=3: expected=09 actual=%d", get_next_states( 1)(3)) assert(get_next_states( 1)(4) === 1.U(plru.nBits.W), s"get_next_state state=01 way=4: expected=01 actual=%d", get_next_states( 1)(4)) assert(get_next_states( 2)(0) === 15.U(plru.nBits.W), s"get_next_state state=02 way=0: expected=15 actual=%d", get_next_states( 2)(0)) assert(get_next_states( 2)(1) === 14.U(plru.nBits.W), s"get_next_state state=02 way=1: expected=14 actual=%d", get_next_states( 2)(1)) assert(get_next_states( 2)(2) === 10.U(plru.nBits.W), s"get_next_state state=02 way=2: expected=10 actual=%d", get_next_states( 2)(2)) assert(get_next_states( 2)(3) === 8.U(plru.nBits.W), s"get_next_state state=02 way=3: expected=08 actual=%d", get_next_states( 2)(3)) assert(get_next_states( 2)(4) === 2.U(plru.nBits.W), s"get_next_state state=02 way=4: expected=02 actual=%d", get_next_states( 2)(4)) assert(get_next_states( 3)(0) === 15.U(plru.nBits.W), s"get_next_state state=03 way=0: expected=15 actual=%d", get_next_states( 3)(0)) assert(get_next_states( 3)(1) === 14.U(plru.nBits.W), s"get_next_state state=03 way=1: expected=14 actual=%d", get_next_states( 3)(1)) assert(get_next_states( 3)(2) === 11.U(plru.nBits.W), s"get_next_state state=03 way=2: expected=11 actual=%d", get_next_states( 3)(2)) assert(get_next_states( 3)(3) === 9.U(plru.nBits.W), s"get_next_state state=03 way=3: expected=09 actual=%d", get_next_states( 3)(3)) assert(get_next_states( 3)(4) === 3.U(plru.nBits.W), s"get_next_state state=03 way=4: expected=03 actual=%d", get_next_states( 3)(4)) assert(get_next_states( 4)(0) === 13.U(plru.nBits.W), s"get_next_state state=04 way=0: expected=13 actual=%d", get_next_states( 4)(0)) assert(get_next_states( 4)(1) === 12.U(plru.nBits.W), s"get_next_state state=04 way=1: expected=12 actual=%d", get_next_states( 4)(1)) assert(get_next_states( 4)(2) === 10.U(plru.nBits.W), s"get_next_state state=04 way=2: expected=10 actual=%d", get_next_states( 4)(2)) assert(get_next_states( 4)(3) === 8.U(plru.nBits.W), s"get_next_state state=04 way=3: expected=08 actual=%d", get_next_states( 4)(3)) assert(get_next_states( 4)(4) === 4.U(plru.nBits.W), s"get_next_state state=04 way=4: expected=04 actual=%d", get_next_states( 4)(4)) assert(get_next_states( 5)(0) === 13.U(plru.nBits.W), s"get_next_state state=05 way=0: expected=13 actual=%d", get_next_states( 5)(0)) assert(get_next_states( 5)(1) === 12.U(plru.nBits.W), s"get_next_state state=05 way=1: expected=12 actual=%d", get_next_states( 5)(1)) assert(get_next_states( 5)(2) === 11.U(plru.nBits.W), s"get_next_state state=05 way=2: expected=11 actual=%d", get_next_states( 5)(2)) assert(get_next_states( 5)(3) === 9.U(plru.nBits.W), s"get_next_state state=05 way=3: expected=09 actual=%d", get_next_states( 5)(3)) assert(get_next_states( 5)(4) === 5.U(plru.nBits.W), s"get_next_state state=05 way=4: expected=05 actual=%d", get_next_states( 5)(4)) assert(get_next_states( 6)(0) === 15.U(plru.nBits.W), s"get_next_state state=06 way=0: expected=15 actual=%d", get_next_states( 6)(0)) assert(get_next_states( 6)(1) === 14.U(plru.nBits.W), s"get_next_state state=06 way=1: expected=14 actual=%d", get_next_states( 6)(1)) assert(get_next_states( 6)(2) === 10.U(plru.nBits.W), s"get_next_state state=06 way=2: expected=10 actual=%d", get_next_states( 6)(2)) assert(get_next_states( 6)(3) === 8.U(plru.nBits.W), s"get_next_state state=06 way=3: expected=08 actual=%d", get_next_states( 6)(3)) assert(get_next_states( 6)(4) === 6.U(plru.nBits.W), s"get_next_state state=06 way=4: expected=06 actual=%d", get_next_states( 6)(4)) assert(get_next_states( 7)(0) === 15.U(plru.nBits.W), s"get_next_state state=07 way=0: expected=15 actual=%d", get_next_states( 7)(0)) assert(get_next_states( 7)(1) === 14.U(plru.nBits.W), s"get_next_state state=07 way=5: expected=14 actual=%d", get_next_states( 7)(1)) assert(get_next_states( 7)(2) === 11.U(plru.nBits.W), s"get_next_state state=07 way=2: expected=11 actual=%d", get_next_states( 7)(2)) assert(get_next_states( 7)(3) === 9.U(plru.nBits.W), s"get_next_state state=07 way=3: expected=09 actual=%d", get_next_states( 7)(3)) assert(get_next_states( 7)(4) === 7.U(plru.nBits.W), s"get_next_state state=07 way=4: expected=07 actual=%d", get_next_states( 7)(4)) assert(get_next_states( 8)(0) === 13.U(plru.nBits.W), s"get_next_state state=08 way=0: expected=13 actual=%d", get_next_states( 8)(0)) assert(get_next_states( 8)(1) === 12.U(plru.nBits.W), s"get_next_state state=08 way=1: expected=12 actual=%d", get_next_states( 8)(1)) assert(get_next_states( 8)(2) === 10.U(plru.nBits.W), s"get_next_state state=08 way=2: expected=10 actual=%d", get_next_states( 8)(2)) assert(get_next_states( 8)(3) === 8.U(plru.nBits.W), s"get_next_state state=08 way=3: expected=08 actual=%d", get_next_states( 8)(3)) assert(get_next_states( 8)(4) === 0.U(plru.nBits.W), s"get_next_state state=08 way=4: expected=00 actual=%d", get_next_states( 8)(4)) assert(get_next_states( 9)(0) === 13.U(plru.nBits.W), s"get_next_state state=09 way=0: expected=13 actual=%d", get_next_states( 9)(0)) assert(get_next_states( 9)(1) === 12.U(plru.nBits.W), s"get_next_state state=09 way=1: expected=12 actual=%d", get_next_states( 9)(1)) assert(get_next_states( 9)(2) === 11.U(plru.nBits.W), s"get_next_state state=09 way=2: expected=11 actual=%d", get_next_states( 9)(2)) assert(get_next_states( 9)(3) === 9.U(plru.nBits.W), s"get_next_state state=09 way=3: expected=09 actual=%d", get_next_states( 9)(3)) assert(get_next_states( 9)(4) === 1.U(plru.nBits.W), s"get_next_state state=09 way=4: expected=01 actual=%d", get_next_states( 9)(4)) assert(get_next_states(10)(0) === 15.U(plru.nBits.W), s"get_next_state state=10 way=0: expected=15 actual=%d", get_next_states(10)(0)) assert(get_next_states(10)(1) === 14.U(plru.nBits.W), s"get_next_state state=10 way=1: expected=14 actual=%d", get_next_states(10)(1)) assert(get_next_states(10)(2) === 10.U(plru.nBits.W), s"get_next_state state=10 way=2: expected=10 actual=%d", get_next_states(10)(2)) assert(get_next_states(10)(3) === 8.U(plru.nBits.W), s"get_next_state state=10 way=3: expected=08 actual=%d", get_next_states(10)(3)) assert(get_next_states(10)(4) === 2.U(plru.nBits.W), s"get_next_state state=10 way=4: expected=02 actual=%d", get_next_states(10)(4)) assert(get_next_states(11)(0) === 15.U(plru.nBits.W), s"get_next_state state=11 way=0: expected=15 actual=%d", get_next_states(11)(0)) assert(get_next_states(11)(1) === 14.U(plru.nBits.W), s"get_next_state state=11 way=1: expected=14 actual=%d", get_next_states(11)(1)) assert(get_next_states(11)(2) === 11.U(plru.nBits.W), s"get_next_state state=11 way=2: expected=11 actual=%d", get_next_states(11)(2)) assert(get_next_states(11)(3) === 9.U(plru.nBits.W), s"get_next_state state=11 way=3: expected=09 actual=%d", get_next_states(11)(3)) assert(get_next_states(11)(4) === 3.U(plru.nBits.W), s"get_next_state state=11 way=4: expected=03 actual=%d", get_next_states(11)(4)) assert(get_next_states(12)(0) === 13.U(plru.nBits.W), s"get_next_state state=12 way=0: expected=13 actual=%d", get_next_states(12)(0)) assert(get_next_states(12)(1) === 12.U(plru.nBits.W), s"get_next_state state=12 way=1: expected=12 actual=%d", get_next_states(12)(1)) assert(get_next_states(12)(2) === 10.U(plru.nBits.W), s"get_next_state state=12 way=2: expected=10 actual=%d", get_next_states(12)(2)) assert(get_next_states(12)(3) === 8.U(plru.nBits.W), s"get_next_state state=12 way=3: expected=08 actual=%d", get_next_states(12)(3)) assert(get_next_states(12)(4) === 4.U(plru.nBits.W), s"get_next_state state=12 way=4: expected=04 actual=%d", get_next_states(12)(4)) assert(get_next_states(13)(0) === 13.U(plru.nBits.W), s"get_next_state state=13 way=0: expected=13 actual=%d", get_next_states(13)(0)) assert(get_next_states(13)(1) === 12.U(plru.nBits.W), s"get_next_state state=13 way=1: expected=12 actual=%d", get_next_states(13)(1)) assert(get_next_states(13)(2) === 11.U(plru.nBits.W), s"get_next_state state=13 way=2: expected=11 actual=%d", get_next_states(13)(2)) assert(get_next_states(13)(3) === 9.U(plru.nBits.W), s"get_next_state state=13 way=3: expected=09 actual=%d", get_next_states(13)(3)) assert(get_next_states(13)(4) === 5.U(plru.nBits.W), s"get_next_state state=13 way=4: expected=05 actual=%d", get_next_states(13)(4)) assert(get_next_states(14)(0) === 15.U(plru.nBits.W), s"get_next_state state=14 way=0: expected=15 actual=%d", get_next_states(14)(0)) assert(get_next_states(14)(1) === 14.U(plru.nBits.W), s"get_next_state state=14 way=1: expected=14 actual=%d", get_next_states(14)(1)) assert(get_next_states(14)(2) === 10.U(plru.nBits.W), s"get_next_state state=14 way=2: expected=10 actual=%d", get_next_states(14)(2)) assert(get_next_states(14)(3) === 8.U(plru.nBits.W), s"get_next_state state=14 way=3: expected=08 actual=%d", get_next_states(14)(3)) assert(get_next_states(14)(4) === 6.U(plru.nBits.W), s"get_next_state state=14 way=4: expected=06 actual=%d", get_next_states(14)(4)) assert(get_next_states(15)(0) === 15.U(plru.nBits.W), s"get_next_state state=15 way=0: expected=15 actual=%d", get_next_states(15)(0)) assert(get_next_states(15)(1) === 14.U(plru.nBits.W), s"get_next_state state=15 way=5: expected=14 actual=%d", get_next_states(15)(1)) assert(get_next_states(15)(2) === 11.U(plru.nBits.W), s"get_next_state state=15 way=2: expected=11 actual=%d", get_next_states(15)(2)) assert(get_next_states(15)(3) === 9.U(plru.nBits.W), s"get_next_state state=15 way=3: expected=09 actual=%d", get_next_states(15)(3)) assert(get_next_states(15)(4) === 7.U(plru.nBits.W), s"get_next_state state=15 way=4: expected=07 actual=%d", get_next_states(15)(4)) } case 6 => { assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0)) assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1)) assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2)) assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3)) assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4)) assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5)) assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6)) assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7)) assert(get_replace_ways( 8) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=0 actual=%d", get_replace_ways( 8)) assert(get_replace_ways( 9) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=1 actual=%d", get_replace_ways( 9)) assert(get_replace_ways(10) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=0 actual=%d", get_replace_ways(10)) assert(get_replace_ways(11) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=1 actual=%d", get_replace_ways(11)) assert(get_replace_ways(12) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=2 actual=%d", get_replace_ways(12)) assert(get_replace_ways(13) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=2 actual=%d", get_replace_ways(13)) assert(get_replace_ways(14) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=3 actual=%d", get_replace_ways(14)) assert(get_replace_ways(15) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=3 actual=%d", get_replace_ways(15)) assert(get_replace_ways(16) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=16: expected=4 actual=%d", get_replace_ways(16)) assert(get_replace_ways(17) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=17: expected=4 actual=%d", get_replace_ways(17)) assert(get_replace_ways(18) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=18: expected=4 actual=%d", get_replace_ways(18)) assert(get_replace_ways(19) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=19: expected=4 actual=%d", get_replace_ways(19)) assert(get_replace_ways(20) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=20: expected=4 actual=%d", get_replace_ways(20)) assert(get_replace_ways(21) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=21: expected=4 actual=%d", get_replace_ways(21)) assert(get_replace_ways(22) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=22: expected=4 actual=%d", get_replace_ways(22)) assert(get_replace_ways(23) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=23: expected=4 actual=%d", get_replace_ways(23)) assert(get_replace_ways(24) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=24: expected=5 actual=%d", get_replace_ways(24)) assert(get_replace_ways(25) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=25: expected=5 actual=%d", get_replace_ways(25)) assert(get_replace_ways(26) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=26: expected=5 actual=%d", get_replace_ways(26)) assert(get_replace_ways(27) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=27: expected=5 actual=%d", get_replace_ways(27)) assert(get_replace_ways(28) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=28: expected=5 actual=%d", get_replace_ways(28)) assert(get_replace_ways(29) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=29: expected=5 actual=%d", get_replace_ways(29)) assert(get_replace_ways(30) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=30: expected=5 actual=%d", get_replace_ways(30)) assert(get_replace_ways(31) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=31: expected=5 actual=%d", get_replace_ways(31)) } case _ => throw new IllegalArgumentException(s"no test pattern found for n_ways=$n_ways") } } File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } File Consts.scala: // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket.constants import chisel3._ import chisel3.util._ import freechips.rocketchip.util._ trait ScalarOpConstants { val SZ_BR = 3 def BR_X = BitPat("b???") def BR_EQ = 0.U(3.W) def BR_NE = 1.U(3.W) def BR_J = 2.U(3.W) def BR_N = 3.U(3.W) def BR_LT = 4.U(3.W) def BR_GE = 5.U(3.W) def BR_LTU = 6.U(3.W) def BR_GEU = 7.U(3.W) def A1_X = BitPat("b??") def A1_ZERO = 0.U(2.W) def A1_RS1 = 1.U(2.W) def A1_PC = 2.U(2.W) def A1_RS1SHL = 3.U(2.W) def IMM_X = BitPat("b???") def IMM_S = 0.U(3.W) def IMM_SB = 1.U(3.W) def IMM_U = 2.U(3.W) def IMM_UJ = 3.U(3.W) def IMM_I = 4.U(3.W) def IMM_Z = 5.U(3.W) def A2_X = BitPat("b???") def A2_ZERO = 0.U(3.W) def A2_SIZE = 1.U(3.W) def A2_RS2 = 2.U(3.W) def A2_IMM = 3.U(3.W) def A2_RS2OH = 4.U(3.W) def A2_IMMOH = 5.U(3.W) def X = BitPat("b?") def N = BitPat("b0") def Y = BitPat("b1") val SZ_DW = 1 def DW_X = X def DW_32 = false.B def DW_64 = true.B def DW_XPR = DW_64 } trait MemoryOpConstants { val NUM_XA_OPS = 9 val M_SZ = 5 def M_X = BitPat("b?????"); def M_XRD = "b00000".U; // int load def M_XWR = "b00001".U; // int store def M_PFR = "b00010".U; // prefetch with intent to read def M_PFW = "b00011".U; // prefetch with intent to write def M_XA_SWAP = "b00100".U def M_FLUSH_ALL = "b00101".U // flush all lines def M_XLR = "b00110".U def M_XSC = "b00111".U def M_XA_ADD = "b01000".U def M_XA_XOR = "b01001".U def M_XA_OR = "b01010".U def M_XA_AND = "b01011".U def M_XA_MIN = "b01100".U def M_XA_MAX = "b01101".U def M_XA_MINU = "b01110".U def M_XA_MAXU = "b01111".U def M_FLUSH = "b10000".U // write back dirty data and cede R/W permissions def M_PWR = "b10001".U // partial (masked) store def M_PRODUCE = "b10010".U // write back dirty data and cede W permissions def M_CLEAN = "b10011".U // write back dirty data and retain R/W permissions def M_SFENCE = "b10100".U // SFENCE.VMA def M_HFENCEV = "b10101".U // HFENCE.VVMA def M_HFENCEG = "b10110".U // HFENCE.GVMA def M_WOK = "b10111".U // check write permissions but don't perform a write def M_HLVX = "b10000".U // HLVX instruction def isAMOLogical(cmd: UInt) = cmd.isOneOf(M_XA_SWAP, M_XA_XOR, M_XA_OR, M_XA_AND) def isAMOArithmetic(cmd: UInt) = cmd.isOneOf(M_XA_ADD, M_XA_MIN, M_XA_MAX, M_XA_MINU, M_XA_MAXU) def isAMO(cmd: UInt) = isAMOLogical(cmd) || isAMOArithmetic(cmd) def isPrefetch(cmd: UInt) = cmd === M_PFR || cmd === M_PFW def isRead(cmd: UInt) = cmd.isOneOf(M_XRD, M_HLVX, M_XLR, M_XSC) || isAMO(cmd) def isWrite(cmd: UInt) = cmd === M_XWR || cmd === M_PWR || cmd === M_XSC || isAMO(cmd) def isWriteIntent(cmd: UInt) = isWrite(cmd) || cmd === M_PFW || cmd === M_XLR } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File consts.scala: //****************************************************************************** // Copyright (c) 2011 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Constants //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.common.constants import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util.Str import freechips.rocketchip.rocket.RVCExpander /** * Mixin for issue queue types */ trait IQType { val IQT_SZ = 3 val IQT_INT = 1.U(IQT_SZ.W) val IQT_MEM = 2.U(IQT_SZ.W) val IQT_FP = 4.U(IQT_SZ.W) val IQT_MFP = 6.U(IQT_SZ.W) } /** * Mixin for scalar operation constants */ trait ScalarOpConstants { val X = BitPat("b?") val Y = BitPat("b1") val N = BitPat("b0") //************************************ // Extra Constants // Which branch predictor predicted us val BSRC_SZ = 2 val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred val BSRC_C = 3.U(BSRC_SZ.W) // core branch resolution //************************************ // Control Signals // CFI types val CFI_SZ = 3 val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction val CFI_BR = 1.U(CFI_SZ.W) // Branch val CFI_JAL = 2.U(CFI_SZ.W) // JAL val CFI_JALR = 3.U(CFI_SZ.W) // JALR // PC Select Signal val PC_PLUS4 = 0.U(2.W) // PC + 4 val PC_BRJMP = 1.U(2.W) // brjmp_target val PC_JALR = 2.U(2.W) // jump_reg_target // Branch Type val BR_N = 0.U(4.W) // Next val BR_NE = 1.U(4.W) // Branch on NotEqual val BR_EQ = 2.U(4.W) // Branch on Equal val BR_GE = 3.U(4.W) // Branch on Greater/Equal val BR_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned val BR_LT = 5.U(4.W) // Branch on Less Than val BR_LTU = 6.U(4.W) // Branch on Less Than Unsigned val BR_J = 7.U(4.W) // Jump val BR_JR = 8.U(4.W) // Jump Register // RS1 Operand Select Signal val OP1_RS1 = 0.U(2.W) // Register Source #1 val OP1_ZERO= 1.U(2.W) val OP1_PC = 2.U(2.W) val OP1_X = BitPat("b??") // RS2 Operand Select Signal val OP2_RS2 = 0.U(3.W) // Register Source #2 val OP2_IMM = 1.U(3.W) // immediate val OP2_ZERO= 2.U(3.W) // constant 0 val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4) val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1 val OP2_X = BitPat("b???") // Register File Write Enable Signal val REN_0 = false.B val REN_1 = true.B // Is 32b Word or 64b Doubldword? val SZ_DW = 1 val DW_X = true.B // Bool(xLen==64) val DW_32 = false.B val DW_64 = true.B val DW_XPR = true.B // Bool(xLen==64) // Memory Enable Signal val MEN_0 = false.B val MEN_1 = true.B val MEN_X = false.B // Immediate Extend Select val IS_I = 0.U(3.W) // I-Type (LD,ALU) val IS_S = 1.U(3.W) // S-Type (ST) val IS_B = 2.U(3.W) // SB-Type (BR) val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC) val IS_J = 4.U(3.W) // UJ-Type (J/JAL) val IS_X = BitPat("b???") // Decode Stage Control Signals val RT_FIX = 0.U(2.W) val RT_FLT = 1.U(2.W) val RT_PAS = 3.U(2.W) // pass-through (prs1 := lrs1, etc) val RT_X = 2.U(2.W) // not-a-register (but shouldn't get a busy-bit, etc.) // TODO rename RT_NAR // Micro-op opcodes // TODO change micro-op opcodes into using enum val UOPC_SZ = 7 val uopX = BitPat.dontCare(UOPC_SZ) val uopNOP = 0.U(UOPC_SZ.W) val uopLD = 1.U(UOPC_SZ.W) val uopSTA = 2.U(UOPC_SZ.W) // store address generation val uopSTD = 3.U(UOPC_SZ.W) // store data generation val uopLUI = 4.U(UOPC_SZ.W) val uopADDI = 5.U(UOPC_SZ.W) val uopANDI = 6.U(UOPC_SZ.W) val uopORI = 7.U(UOPC_SZ.W) val uopXORI = 8.U(UOPC_SZ.W) val uopSLTI = 9.U(UOPC_SZ.W) val uopSLTIU= 10.U(UOPC_SZ.W) val uopSLLI = 11.U(UOPC_SZ.W) val uopSRAI = 12.U(UOPC_SZ.W) val uopSRLI = 13.U(UOPC_SZ.W) val uopSLL = 14.U(UOPC_SZ.W) val uopADD = 15.U(UOPC_SZ.W) val uopSUB = 16.U(UOPC_SZ.W) val uopSLT = 17.U(UOPC_SZ.W) val uopSLTU = 18.U(UOPC_SZ.W) val uopAND = 19.U(UOPC_SZ.W) val uopOR = 20.U(UOPC_SZ.W) val uopXOR = 21.U(UOPC_SZ.W) val uopSRA = 22.U(UOPC_SZ.W) val uopSRL = 23.U(UOPC_SZ.W) val uopBEQ = 24.U(UOPC_SZ.W) val uopBNE = 25.U(UOPC_SZ.W) val uopBGE = 26.U(UOPC_SZ.W) val uopBGEU = 27.U(UOPC_SZ.W) val uopBLT = 28.U(UOPC_SZ.W) val uopBLTU = 29.U(UOPC_SZ.W) val uopCSRRW= 30.U(UOPC_SZ.W) val uopCSRRS= 31.U(UOPC_SZ.W) val uopCSRRC= 32.U(UOPC_SZ.W) val uopCSRRWI=33.U(UOPC_SZ.W) val uopCSRRSI=34.U(UOPC_SZ.W) val uopCSRRCI=35.U(UOPC_SZ.W) val uopJ = 36.U(UOPC_SZ.W) val uopJAL = 37.U(UOPC_SZ.W) val uopJALR = 38.U(UOPC_SZ.W) val uopAUIPC= 39.U(UOPC_SZ.W) //val uopSRET = 40.U(UOPC_SZ.W) val uopCFLSH= 41.U(UOPC_SZ.W) val uopFENCE= 42.U(UOPC_SZ.W) val uopADDIW= 43.U(UOPC_SZ.W) val uopADDW = 44.U(UOPC_SZ.W) val uopSUBW = 45.U(UOPC_SZ.W) val uopSLLIW= 46.U(UOPC_SZ.W) val uopSLLW = 47.U(UOPC_SZ.W) val uopSRAIW= 48.U(UOPC_SZ.W) val uopSRAW = 49.U(UOPC_SZ.W) val uopSRLIW= 50.U(UOPC_SZ.W) val uopSRLW = 51.U(UOPC_SZ.W) val uopMUL = 52.U(UOPC_SZ.W) val uopMULH = 53.U(UOPC_SZ.W) val uopMULHU= 54.U(UOPC_SZ.W) val uopMULHSU=55.U(UOPC_SZ.W) val uopMULW = 56.U(UOPC_SZ.W) val uopDIV = 57.U(UOPC_SZ.W) val uopDIVU = 58.U(UOPC_SZ.W) val uopREM = 59.U(UOPC_SZ.W) val uopREMU = 60.U(UOPC_SZ.W) val uopDIVW = 61.U(UOPC_SZ.W) val uopDIVUW= 62.U(UOPC_SZ.W) val uopREMW = 63.U(UOPC_SZ.W) val uopREMUW= 64.U(UOPC_SZ.W) val uopFENCEI = 65.U(UOPC_SZ.W) // = 66.U(UOPC_SZ.W) val uopAMO_AG = 67.U(UOPC_SZ.W) // AMO-address gen (use normal STD for datagen) val uopFMV_W_X = 68.U(UOPC_SZ.W) val uopFMV_D_X = 69.U(UOPC_SZ.W) val uopFMV_X_W = 70.U(UOPC_SZ.W) val uopFMV_X_D = 71.U(UOPC_SZ.W) val uopFSGNJ_S = 72.U(UOPC_SZ.W) val uopFSGNJ_D = 73.U(UOPC_SZ.W) val uopFCVT_S_D = 74.U(UOPC_SZ.W) val uopFCVT_D_S = 75.U(UOPC_SZ.W) val uopFCVT_S_X = 76.U(UOPC_SZ.W) val uopFCVT_D_X = 77.U(UOPC_SZ.W) val uopFCVT_X_S = 78.U(UOPC_SZ.W) val uopFCVT_X_D = 79.U(UOPC_SZ.W) val uopCMPR_S = 80.U(UOPC_SZ.W) val uopCMPR_D = 81.U(UOPC_SZ.W) val uopFCLASS_S = 82.U(UOPC_SZ.W) val uopFCLASS_D = 83.U(UOPC_SZ.W) val uopFMINMAX_S = 84.U(UOPC_SZ.W) val uopFMINMAX_D = 85.U(UOPC_SZ.W) // = 86.U(UOPC_SZ.W) val uopFADD_S = 87.U(UOPC_SZ.W) val uopFSUB_S = 88.U(UOPC_SZ.W) val uopFMUL_S = 89.U(UOPC_SZ.W) val uopFADD_D = 90.U(UOPC_SZ.W) val uopFSUB_D = 91.U(UOPC_SZ.W) val uopFMUL_D = 92.U(UOPC_SZ.W) val uopFMADD_S = 93.U(UOPC_SZ.W) val uopFMSUB_S = 94.U(UOPC_SZ.W) val uopFNMADD_S = 95.U(UOPC_SZ.W) val uopFNMSUB_S = 96.U(UOPC_SZ.W) val uopFMADD_D = 97.U(UOPC_SZ.W) val uopFMSUB_D = 98.U(UOPC_SZ.W) val uopFNMADD_D = 99.U(UOPC_SZ.W) val uopFNMSUB_D = 100.U(UOPC_SZ.W) val uopFDIV_S = 101.U(UOPC_SZ.W) val uopFDIV_D = 102.U(UOPC_SZ.W) val uopFSQRT_S = 103.U(UOPC_SZ.W) val uopFSQRT_D = 104.U(UOPC_SZ.W) val uopWFI = 105.U(UOPC_SZ.W) // pass uop down the CSR pipeline val uopERET = 106.U(UOPC_SZ.W) // pass uop down the CSR pipeline, also is ERET val uopSFENCE = 107.U(UOPC_SZ.W) val uopROCC = 108.U(UOPC_SZ.W) val uopMOV = 109.U(UOPC_SZ.W) // conditional mov decoded from "add rd, x0, rs2" // The Bubble Instruction (Machine generated NOP) // Insert (XOR x0,x0,x0) which is different from software compiler // generated NOPs which are (ADDI x0, x0, 0). // Reasoning for this is to let visualizers and stat-trackers differentiate // between software NOPs and machine-generated Bubbles in the pipeline. val BUBBLE = (0x4033).U(32.W) def NullMicroOp()(implicit p: Parameters): boom.v3.common.MicroOp = { val uop = Wire(new boom.v3.common.MicroOp) uop := DontCare // Overridden in the following lines uop.uopc := uopNOP // maybe not required, but helps on asserts that try to catch spurious behavior uop.bypassable := false.B uop.fp_val := false.B uop.uses_stq := false.B uop.uses_ldq := false.B uop.pdst := 0.U uop.dst_rtype := RT_X val cs = Wire(new boom.v3.common.CtrlSignals()) cs := DontCare // Overridden in the following lines cs.br_type := BR_N cs.csr_cmd := freechips.rocketchip.rocket.CSR.N cs.is_load := false.B cs.is_sta := false.B cs.is_std := false.B uop.ctrl := cs uop } } /** * Mixin for RISCV constants */ trait RISCVConstants { // abstract out instruction decode magic numbers val RD_MSB = 11 val RD_LSB = 7 val RS1_MSB = 19 val RS1_LSB = 15 val RS2_MSB = 24 val RS2_LSB = 20 val RS3_MSB = 31 val RS3_LSB = 27 val CSR_ADDR_MSB = 31 val CSR_ADDR_LSB = 20 val CSR_ADDR_SZ = 12 // location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.) val SHAMT_5_BIT = 25 val LONGEST_IMM_SZ = 20 val X0 = 0.U val RA = 1.U // return address register // memory consistency model // The C/C++ atomics MCM requires that two loads to the same address maintain program order. // The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior). val MCM_ORDER_DEPENDENT_LOADS = true val jal_opc = (0x6f).U val jalr_opc = (0x67).U def GetUop(inst: UInt): UInt = inst(6,0) def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB) def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB) def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = { val rvc_exp = Module(new RVCExpander) rvc_exp.io.in := inst Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst) } // Note: Accepts only EXPANDED rvc instructions def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) ((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) ((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = { val bdecode = Module(new boom.v3.exu.BranchDecode) bdecode.io.inst := inst bdecode.io.pc := 0.U bdecode.io.out.cfi_type } } /** * Mixin for exception cause constants */ trait ExcCauseConstants { // a memory disambigious misspeculation occurred val MINI_EXCEPTION_MEM_ORDERING = 16.U val MINI_EXCEPTION_CSR_REPLAY = 17.U require (!freechips.rocketchip.rocket.Causes.all.contains(16)) require (!freechips.rocketchip.rocket.Causes.all.contains(17)) } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } } File Arbiter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ object TLArbiter { // (valids, select) => readys type Policy = (Integer, UInt, Bool) => UInt val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0) val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width)) val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else { val valid = valids(width-1, 0) assert (valid === valids) val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0)) val filter = Cat(valid & ~mask, valid) val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width) val readys = ~((unready >> width) & unready(width-1, 0)) when (select && valid.orR) { mask := leftOR(readys & valid, width) } readys(width-1, 0) } def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = { apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*) } def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = { if (sources.isEmpty) { sink.bits := DontCare } else if (sources.size == 1) { sink :<>= sources.head._2 } else { val pairs = sources.toList val beatsIn = pairs.map(_._1) val sourcesIn = pairs.map(_._2) // The number of beats which remain to be sent val beatsLeft = RegInit(0.U) val idle = beatsLeft === 0.U val latch = idle && sink.ready // winner (if any) claims sink // Who wants access to the sink? val valids = sourcesIn.map(_.valid) // Arbitrate amongst the requests val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools) // Which request wins arbitration? val winner = VecInit((readys zip valids) map { case (r,v) => r&&v }) // Confirm the policy works properly require (readys.size == valids.size) // Never two winners val prefixOR = winner.scanLeft(false.B)(_||_).init assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _}) // If there was any request, there is a winner assert (!valids.reduce(_||_) || winner.reduce(_||_)) // Track remaining beats val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) } val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire) // The one-hot source granted access in the previous cycle val state = RegInit(VecInit(Seq.fill(sources.size)(false.B))) val muxState = Mux(idle, winner, state) state := muxState val allowed = Mux(idle, readys, state) (sourcesIn zip allowed) foreach { case (s, r) => s.ready := sink.ready && r } sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids)) sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits)) } } } // Synthesizable unit tests import freechips.rocketchip.unittest._ abstract class DecoupledArbiterTest( policy: TLArbiter.Policy, txns: Int, timeout: Int, val numSources: Int, beatsLeftFromIdx: Int => UInt) (implicit p: Parameters) extends UnitTest(timeout) { val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W)))) dontTouch(sources.suggestName("sources")) val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W))) dontTouch(sink.suggestName("sink")) val count = RegInit(0.U(log2Ceil(txns).W)) val lfsr = LFSR(16, true.B) sources.zipWithIndex.map { case (z, i) => z.bits := i.U } TLArbiter(policy)(sink, sources.zipWithIndex.map { case (z, i) => (beatsLeftFromIdx(i), z) }:_*) count := count + 1.U io.finished := count >= txns.U } /** This tests that when a specific pattern of source valids are driven, * a new index from amongst that pattern is always selected, * unless one of those sources takes multiple beats, * in which case the same index should be selected until the arbiter goes idle. */ class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false) (implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U) { val lastWinner = RegInit((numSources+1).U) val beatsLeft = RegInit(0.U(log2Ceil(numSources).W)) val first = lastWinner > numSources.U val valid = lfsr(0) val ready = lfsr(15) sink.ready := ready sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid) } when (sink.fire) { if (print) { printf("TestRobin: %d\n", sink.bits) } when (beatsLeft === 0.U) { assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.") lastWinner := sink.bits beatsLeft := sink.bits } .otherwise { assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats") beatsLeft := beatsLeft - 1.U } } if (print) { when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) } } } /** This tests that the lowest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertLowest(id: Int): Unit = { when (sources(id).valid) { assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) } } /** This tests that the highest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertHighest(id: Int): Unit = { when (sources(id).valid) { assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) } } File AMOALU.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters class StoreGen(typ: UInt, addr: UInt, dat: UInt, maxSize: Int) { val size = Wire(UInt(log2Up(log2Up(maxSize)+1).W)) size := typ val dat_padded = dat.pad(maxSize*8) def misaligned: Bool = (addr & ((1.U << size) - 1.U)(log2Up(maxSize)-1,0)).orR def mask = { var res = 1.U for (i <- 0 until log2Up(maxSize)) { val upper = Mux(addr(i), res, 0.U) | Mux(size >= (i+1).U, ((BigInt(1) << (1 << i))-1).U, 0.U) val lower = Mux(addr(i), 0.U, res) res = Cat(upper, lower) } res } protected def genData(i: Int): UInt = if (i >= log2Up(maxSize)) dat_padded else Mux(size === i.U, Fill(1 << (log2Up(maxSize)-i), dat_padded((8 << i)-1,0)), genData(i+1)) def data = genData(0) def wordData = genData(2) } class LoadGen(typ: UInt, signed: Bool, addr: UInt, dat: UInt, zero: Bool, maxSize: Int) { private val size = new StoreGen(typ, addr, dat, maxSize).size private def genData(logMinSize: Int): UInt = { var res = dat for (i <- log2Up(maxSize)-1 to logMinSize by -1) { val pos = 8 << i val shifted = Mux(addr(i), res(2*pos-1,pos), res(pos-1,0)) val doZero = (i == 0).B && zero val zeroed = Mux(doZero, 0.U, shifted) res = Cat(Mux(size === i.U || doZero, Fill(8*maxSize-pos, signed && zeroed(pos-1)), res(8*maxSize-1,pos)), zeroed) } res } def wordData = genData(2) def data = genData(0) } class AMOALU(operandBits: Int)(implicit p: Parameters) extends Module { val minXLen = 32 val widths = (0 to log2Ceil(operandBits / minXLen)).map(minXLen << _) val io = IO(new Bundle { val mask = Input(UInt((operandBits / 8).W)) val cmd = Input(UInt(M_SZ.W)) val lhs = Input(UInt(operandBits.W)) val rhs = Input(UInt(operandBits.W)) val out = Output(UInt(operandBits.W)) val out_unmasked = Output(UInt(operandBits.W)) }) val max = io.cmd === M_XA_MAX || io.cmd === M_XA_MAXU val min = io.cmd === M_XA_MIN || io.cmd === M_XA_MINU val add = io.cmd === M_XA_ADD val logic_and = io.cmd === M_XA_OR || io.cmd === M_XA_AND val logic_xor = io.cmd === M_XA_XOR || io.cmd === M_XA_OR val adder_out = { // partition the carry chain to support sub-xLen addition val mask = ~(0.U(operandBits.W) +: widths.init.map(w => !io.mask(w/8-1) << (w-1))).reduce(_|_) (io.lhs & mask) + (io.rhs & mask) } val less = { // break up the comparator so the lower parts will be CSE'd def isLessUnsigned(x: UInt, y: UInt, n: Int): Bool = { if (n == minXLen) x(n-1, 0) < y(n-1, 0) else x(n-1, n/2) < y(n-1, n/2) || x(n-1, n/2) === y(n-1, n/2) && isLessUnsigned(x, y, n/2) } def isLess(x: UInt, y: UInt, n: Int): Bool = { val signed = { val mask = M_XA_MIN ^ M_XA_MINU (io.cmd & mask) === (M_XA_MIN & mask) } Mux(x(n-1) === y(n-1), isLessUnsigned(x, y, n), Mux(signed, x(n-1), y(n-1))) } PriorityMux(widths.reverse.map(w => (io.mask(w/8/2), isLess(io.lhs, io.rhs, w)))) } val minmax = Mux(Mux(less, min, max), io.lhs, io.rhs) val logic = Mux(logic_and, io.lhs & io.rhs, 0.U) | Mux(logic_xor, io.lhs ^ io.rhs, 0.U) val out = Mux(add, adder_out, Mux(logic_and || logic_xor, logic, minmax)) val wmask = FillInterleaved(8, io.mask) io.out := wmask & out | ~wmask & io.lhs io.out_unmasked := out }
module BoomNonBlockingDCache( // @[dcache.scala:413:7] input clock, // @[dcache.scala:413:7] input reset, // @[dcache.scala:413:7] input auto_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_out_b_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_b_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_b_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_out_b_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_b_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_out_b_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_out_b_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_b_bits_data, // @[LazyModuleImp.scala:107:25] input auto_out_b_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_out_c_ready, // @[LazyModuleImp.scala:107:25] output auto_out_c_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_c_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_c_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_c_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_out_c_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_c_bits_address, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_c_bits_data, // @[LazyModuleImp.scala:107:25] output auto_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_out_e_ready, // @[LazyModuleImp.scala:107:25] output auto_out_e_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_e_bits_sink, // @[LazyModuleImp.scala:107:25] output io_lsu_req_ready, // @[dcache.scala:419:14] input io_lsu_req_valid, // @[dcache.scala:419:14] input io_lsu_req_bits_0_valid, // @[dcache.scala:419:14] input [6:0] io_lsu_req_bits_0_bits_uop_uopc, // @[dcache.scala:419:14] input [31:0] io_lsu_req_bits_0_bits_uop_inst, // @[dcache.scala:419:14] input [31:0] io_lsu_req_bits_0_bits_uop_debug_inst, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_is_rvc, // @[dcache.scala:419:14] input [39:0] io_lsu_req_bits_0_bits_uop_debug_pc, // @[dcache.scala:419:14] input [2:0] io_lsu_req_bits_0_bits_uop_iq_type, // @[dcache.scala:419:14] input [9:0] io_lsu_req_bits_0_bits_uop_fu_code, // @[dcache.scala:419:14] input [3:0] io_lsu_req_bits_0_bits_uop_ctrl_br_type, // @[dcache.scala:419:14] input [1:0] io_lsu_req_bits_0_bits_uop_ctrl_op1_sel, // @[dcache.scala:419:14] input [2:0] io_lsu_req_bits_0_bits_uop_ctrl_op2_sel, // @[dcache.scala:419:14] input [2:0] io_lsu_req_bits_0_bits_uop_ctrl_imm_sel, // @[dcache.scala:419:14] input [4:0] io_lsu_req_bits_0_bits_uop_ctrl_op_fcn, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_ctrl_fcn_dw, // @[dcache.scala:419:14] input [2:0] io_lsu_req_bits_0_bits_uop_ctrl_csr_cmd, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_ctrl_is_load, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_ctrl_is_sta, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_ctrl_is_std, // @[dcache.scala:419:14] input [1:0] io_lsu_req_bits_0_bits_uop_iw_state, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_iw_p1_poisoned, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_iw_p2_poisoned, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_is_br, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_is_jalr, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_is_jal, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_is_sfb, // @[dcache.scala:419:14] input [7:0] io_lsu_req_bits_0_bits_uop_br_mask, // @[dcache.scala:419:14] input [2:0] io_lsu_req_bits_0_bits_uop_br_tag, // @[dcache.scala:419:14] input [3:0] io_lsu_req_bits_0_bits_uop_ftq_idx, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_edge_inst, // @[dcache.scala:419:14] input [5:0] io_lsu_req_bits_0_bits_uop_pc_lob, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_taken, // @[dcache.scala:419:14] input [19:0] io_lsu_req_bits_0_bits_uop_imm_packed, // @[dcache.scala:419:14] input [11:0] io_lsu_req_bits_0_bits_uop_csr_addr, // @[dcache.scala:419:14] input [4:0] io_lsu_req_bits_0_bits_uop_rob_idx, // @[dcache.scala:419:14] input [2:0] io_lsu_req_bits_0_bits_uop_ldq_idx, // @[dcache.scala:419:14] input [2:0] io_lsu_req_bits_0_bits_uop_stq_idx, // @[dcache.scala:419:14] input [1:0] io_lsu_req_bits_0_bits_uop_rxq_idx, // @[dcache.scala:419:14] input [5:0] io_lsu_req_bits_0_bits_uop_pdst, // @[dcache.scala:419:14] input [5:0] io_lsu_req_bits_0_bits_uop_prs1, // @[dcache.scala:419:14] input [5:0] io_lsu_req_bits_0_bits_uop_prs2, // @[dcache.scala:419:14] input [5:0] io_lsu_req_bits_0_bits_uop_prs3, // @[dcache.scala:419:14] input [3:0] io_lsu_req_bits_0_bits_uop_ppred, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_prs1_busy, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_prs2_busy, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_prs3_busy, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_ppred_busy, // @[dcache.scala:419:14] input [5:0] io_lsu_req_bits_0_bits_uop_stale_pdst, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_exception, // @[dcache.scala:419:14] input [63:0] io_lsu_req_bits_0_bits_uop_exc_cause, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_bypassable, // @[dcache.scala:419:14] input [4:0] io_lsu_req_bits_0_bits_uop_mem_cmd, // @[dcache.scala:419:14] input [1:0] io_lsu_req_bits_0_bits_uop_mem_size, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_mem_signed, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_is_fence, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_is_fencei, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_is_amo, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_uses_ldq, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_uses_stq, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_is_sys_pc2epc, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_is_unique, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_flush_on_commit, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_ldst_is_rs1, // @[dcache.scala:419:14] input [5:0] io_lsu_req_bits_0_bits_uop_ldst, // @[dcache.scala:419:14] input [5:0] io_lsu_req_bits_0_bits_uop_lrs1, // @[dcache.scala:419:14] input [5:0] io_lsu_req_bits_0_bits_uop_lrs2, // @[dcache.scala:419:14] input [5:0] io_lsu_req_bits_0_bits_uop_lrs3, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_ldst_val, // @[dcache.scala:419:14] input [1:0] io_lsu_req_bits_0_bits_uop_dst_rtype, // @[dcache.scala:419:14] input [1:0] io_lsu_req_bits_0_bits_uop_lrs1_rtype, // @[dcache.scala:419:14] input [1:0] io_lsu_req_bits_0_bits_uop_lrs2_rtype, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_frs3_en, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_fp_val, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_fp_single, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_xcpt_pf_if, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_xcpt_ae_if, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_xcpt_ma_if, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_bp_debug_if, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_uop_bp_xcpt_if, // @[dcache.scala:419:14] input [1:0] io_lsu_req_bits_0_bits_uop_debug_fsrc, // @[dcache.scala:419:14] input [1:0] io_lsu_req_bits_0_bits_uop_debug_tsrc, // @[dcache.scala:419:14] input [39:0] io_lsu_req_bits_0_bits_addr, // @[dcache.scala:419:14] input [63:0] io_lsu_req_bits_0_bits_data, // @[dcache.scala:419:14] input io_lsu_req_bits_0_bits_is_hella, // @[dcache.scala:419:14] input io_lsu_s1_kill_0, // @[dcache.scala:419:14] output io_lsu_resp_0_valid, // @[dcache.scala:419:14] output [6:0] io_lsu_resp_0_bits_uop_uopc, // @[dcache.scala:419:14] output [31:0] io_lsu_resp_0_bits_uop_inst, // @[dcache.scala:419:14] output [31:0] io_lsu_resp_0_bits_uop_debug_inst, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_is_rvc, // @[dcache.scala:419:14] output [39:0] io_lsu_resp_0_bits_uop_debug_pc, // @[dcache.scala:419:14] output [2:0] io_lsu_resp_0_bits_uop_iq_type, // @[dcache.scala:419:14] output [9:0] io_lsu_resp_0_bits_uop_fu_code, // @[dcache.scala:419:14] output [3:0] io_lsu_resp_0_bits_uop_ctrl_br_type, // @[dcache.scala:419:14] output [1:0] io_lsu_resp_0_bits_uop_ctrl_op1_sel, // @[dcache.scala:419:14] output [2:0] io_lsu_resp_0_bits_uop_ctrl_op2_sel, // @[dcache.scala:419:14] output [2:0] io_lsu_resp_0_bits_uop_ctrl_imm_sel, // @[dcache.scala:419:14] output [4:0] io_lsu_resp_0_bits_uop_ctrl_op_fcn, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_ctrl_fcn_dw, // @[dcache.scala:419:14] output [2:0] io_lsu_resp_0_bits_uop_ctrl_csr_cmd, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_ctrl_is_load, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_ctrl_is_sta, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_ctrl_is_std, // @[dcache.scala:419:14] output [1:0] io_lsu_resp_0_bits_uop_iw_state, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_iw_p1_poisoned, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_iw_p2_poisoned, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_is_br, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_is_jalr, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_is_jal, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_is_sfb, // @[dcache.scala:419:14] output [7:0] io_lsu_resp_0_bits_uop_br_mask, // @[dcache.scala:419:14] output [2:0] io_lsu_resp_0_bits_uop_br_tag, // @[dcache.scala:419:14] output [3:0] io_lsu_resp_0_bits_uop_ftq_idx, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_edge_inst, // @[dcache.scala:419:14] output [5:0] io_lsu_resp_0_bits_uop_pc_lob, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_taken, // @[dcache.scala:419:14] output [19:0] io_lsu_resp_0_bits_uop_imm_packed, // @[dcache.scala:419:14] output [11:0] io_lsu_resp_0_bits_uop_csr_addr, // @[dcache.scala:419:14] output [4:0] io_lsu_resp_0_bits_uop_rob_idx, // @[dcache.scala:419:14] output [2:0] io_lsu_resp_0_bits_uop_ldq_idx, // @[dcache.scala:419:14] output [2:0] io_lsu_resp_0_bits_uop_stq_idx, // @[dcache.scala:419:14] output [1:0] io_lsu_resp_0_bits_uop_rxq_idx, // @[dcache.scala:419:14] output [5:0] io_lsu_resp_0_bits_uop_pdst, // @[dcache.scala:419:14] output [5:0] io_lsu_resp_0_bits_uop_prs1, // @[dcache.scala:419:14] output [5:0] io_lsu_resp_0_bits_uop_prs2, // @[dcache.scala:419:14] output [5:0] io_lsu_resp_0_bits_uop_prs3, // @[dcache.scala:419:14] output [3:0] io_lsu_resp_0_bits_uop_ppred, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_prs1_busy, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_prs2_busy, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_prs3_busy, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_ppred_busy, // @[dcache.scala:419:14] output [5:0] io_lsu_resp_0_bits_uop_stale_pdst, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_exception, // @[dcache.scala:419:14] output [63:0] io_lsu_resp_0_bits_uop_exc_cause, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_bypassable, // @[dcache.scala:419:14] output [4:0] io_lsu_resp_0_bits_uop_mem_cmd, // @[dcache.scala:419:14] output [1:0] io_lsu_resp_0_bits_uop_mem_size, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_mem_signed, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_is_fence, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_is_fencei, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_is_amo, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_uses_ldq, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_uses_stq, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_is_sys_pc2epc, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_is_unique, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_flush_on_commit, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_ldst_is_rs1, // @[dcache.scala:419:14] output [5:0] io_lsu_resp_0_bits_uop_ldst, // @[dcache.scala:419:14] output [5:0] io_lsu_resp_0_bits_uop_lrs1, // @[dcache.scala:419:14] output [5:0] io_lsu_resp_0_bits_uop_lrs2, // @[dcache.scala:419:14] output [5:0] io_lsu_resp_0_bits_uop_lrs3, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_ldst_val, // @[dcache.scala:419:14] output [1:0] io_lsu_resp_0_bits_uop_dst_rtype, // @[dcache.scala:419:14] output [1:0] io_lsu_resp_0_bits_uop_lrs1_rtype, // @[dcache.scala:419:14] output [1:0] io_lsu_resp_0_bits_uop_lrs2_rtype, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_frs3_en, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_fp_val, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_fp_single, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_xcpt_pf_if, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_xcpt_ae_if, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_xcpt_ma_if, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_bp_debug_if, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_uop_bp_xcpt_if, // @[dcache.scala:419:14] output [1:0] io_lsu_resp_0_bits_uop_debug_fsrc, // @[dcache.scala:419:14] output [1:0] io_lsu_resp_0_bits_uop_debug_tsrc, // @[dcache.scala:419:14] output [63:0] io_lsu_resp_0_bits_data, // @[dcache.scala:419:14] output io_lsu_resp_0_bits_is_hella, // @[dcache.scala:419:14] output io_lsu_nack_0_valid, // @[dcache.scala:419:14] output [6:0] io_lsu_nack_0_bits_uop_uopc, // @[dcache.scala:419:14] output [31:0] io_lsu_nack_0_bits_uop_inst, // @[dcache.scala:419:14] output [31:0] io_lsu_nack_0_bits_uop_debug_inst, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_is_rvc, // @[dcache.scala:419:14] output [39:0] io_lsu_nack_0_bits_uop_debug_pc, // @[dcache.scala:419:14] output [2:0] io_lsu_nack_0_bits_uop_iq_type, // @[dcache.scala:419:14] output [9:0] io_lsu_nack_0_bits_uop_fu_code, // @[dcache.scala:419:14] output [3:0] io_lsu_nack_0_bits_uop_ctrl_br_type, // @[dcache.scala:419:14] output [1:0] io_lsu_nack_0_bits_uop_ctrl_op1_sel, // @[dcache.scala:419:14] output [2:0] io_lsu_nack_0_bits_uop_ctrl_op2_sel, // @[dcache.scala:419:14] output [2:0] io_lsu_nack_0_bits_uop_ctrl_imm_sel, // @[dcache.scala:419:14] output [4:0] io_lsu_nack_0_bits_uop_ctrl_op_fcn, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_ctrl_fcn_dw, // @[dcache.scala:419:14] output [2:0] io_lsu_nack_0_bits_uop_ctrl_csr_cmd, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_ctrl_is_load, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_ctrl_is_sta, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_ctrl_is_std, // @[dcache.scala:419:14] output [1:0] io_lsu_nack_0_bits_uop_iw_state, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_iw_p1_poisoned, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_iw_p2_poisoned, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_is_br, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_is_jalr, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_is_jal, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_is_sfb, // @[dcache.scala:419:14] output [7:0] io_lsu_nack_0_bits_uop_br_mask, // @[dcache.scala:419:14] output [2:0] io_lsu_nack_0_bits_uop_br_tag, // @[dcache.scala:419:14] output [3:0] io_lsu_nack_0_bits_uop_ftq_idx, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_edge_inst, // @[dcache.scala:419:14] output [5:0] io_lsu_nack_0_bits_uop_pc_lob, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_taken, // @[dcache.scala:419:14] output [19:0] io_lsu_nack_0_bits_uop_imm_packed, // @[dcache.scala:419:14] output [11:0] io_lsu_nack_0_bits_uop_csr_addr, // @[dcache.scala:419:14] output [4:0] io_lsu_nack_0_bits_uop_rob_idx, // @[dcache.scala:419:14] output [2:0] io_lsu_nack_0_bits_uop_ldq_idx, // @[dcache.scala:419:14] output [2:0] io_lsu_nack_0_bits_uop_stq_idx, // @[dcache.scala:419:14] output [1:0] io_lsu_nack_0_bits_uop_rxq_idx, // @[dcache.scala:419:14] output [5:0] io_lsu_nack_0_bits_uop_pdst, // @[dcache.scala:419:14] output [5:0] io_lsu_nack_0_bits_uop_prs1, // @[dcache.scala:419:14] output [5:0] io_lsu_nack_0_bits_uop_prs2, // @[dcache.scala:419:14] output [5:0] io_lsu_nack_0_bits_uop_prs3, // @[dcache.scala:419:14] output [3:0] io_lsu_nack_0_bits_uop_ppred, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_prs1_busy, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_prs2_busy, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_prs3_busy, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_ppred_busy, // @[dcache.scala:419:14] output [5:0] io_lsu_nack_0_bits_uop_stale_pdst, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_exception, // @[dcache.scala:419:14] output [63:0] io_lsu_nack_0_bits_uop_exc_cause, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_bypassable, // @[dcache.scala:419:14] output [4:0] io_lsu_nack_0_bits_uop_mem_cmd, // @[dcache.scala:419:14] output [1:0] io_lsu_nack_0_bits_uop_mem_size, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_mem_signed, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_is_fence, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_is_fencei, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_is_amo, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_uses_ldq, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_uses_stq, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_is_sys_pc2epc, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_is_unique, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_flush_on_commit, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_ldst_is_rs1, // @[dcache.scala:419:14] output [5:0] io_lsu_nack_0_bits_uop_ldst, // @[dcache.scala:419:14] output [5:0] io_lsu_nack_0_bits_uop_lrs1, // @[dcache.scala:419:14] output [5:0] io_lsu_nack_0_bits_uop_lrs2, // @[dcache.scala:419:14] output [5:0] io_lsu_nack_0_bits_uop_lrs3, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_ldst_val, // @[dcache.scala:419:14] output [1:0] io_lsu_nack_0_bits_uop_dst_rtype, // @[dcache.scala:419:14] output [1:0] io_lsu_nack_0_bits_uop_lrs1_rtype, // @[dcache.scala:419:14] output [1:0] io_lsu_nack_0_bits_uop_lrs2_rtype, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_frs3_en, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_fp_val, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_fp_single, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_xcpt_pf_if, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_xcpt_ae_if, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_xcpt_ma_if, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_bp_debug_if, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_uop_bp_xcpt_if, // @[dcache.scala:419:14] output [1:0] io_lsu_nack_0_bits_uop_debug_fsrc, // @[dcache.scala:419:14] output [1:0] io_lsu_nack_0_bits_uop_debug_tsrc, // @[dcache.scala:419:14] output [39:0] io_lsu_nack_0_bits_addr, // @[dcache.scala:419:14] output [63:0] io_lsu_nack_0_bits_data, // @[dcache.scala:419:14] output io_lsu_nack_0_bits_is_hella, // @[dcache.scala:419:14] input [7:0] io_lsu_brupdate_b1_resolve_mask, // @[dcache.scala:419:14] input [7:0] io_lsu_brupdate_b1_mispredict_mask, // @[dcache.scala:419:14] input [6:0] io_lsu_brupdate_b2_uop_uopc, // @[dcache.scala:419:14] input [31:0] io_lsu_brupdate_b2_uop_inst, // @[dcache.scala:419:14] input [31:0] io_lsu_brupdate_b2_uop_debug_inst, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_is_rvc, // @[dcache.scala:419:14] input [39:0] io_lsu_brupdate_b2_uop_debug_pc, // @[dcache.scala:419:14] input [2:0] io_lsu_brupdate_b2_uop_iq_type, // @[dcache.scala:419:14] input [9:0] io_lsu_brupdate_b2_uop_fu_code, // @[dcache.scala:419:14] input [3:0] io_lsu_brupdate_b2_uop_ctrl_br_type, // @[dcache.scala:419:14] input [1:0] io_lsu_brupdate_b2_uop_ctrl_op1_sel, // @[dcache.scala:419:14] input [2:0] io_lsu_brupdate_b2_uop_ctrl_op2_sel, // @[dcache.scala:419:14] input [2:0] io_lsu_brupdate_b2_uop_ctrl_imm_sel, // @[dcache.scala:419:14] input [4:0] io_lsu_brupdate_b2_uop_ctrl_op_fcn, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_ctrl_fcn_dw, // @[dcache.scala:419:14] input [2:0] io_lsu_brupdate_b2_uop_ctrl_csr_cmd, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_ctrl_is_load, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_ctrl_is_sta, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_ctrl_is_std, // @[dcache.scala:419:14] input [1:0] io_lsu_brupdate_b2_uop_iw_state, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_iw_p1_poisoned, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_iw_p2_poisoned, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_is_br, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_is_jalr, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_is_jal, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_is_sfb, // @[dcache.scala:419:14] input [7:0] io_lsu_brupdate_b2_uop_br_mask, // @[dcache.scala:419:14] input [2:0] io_lsu_brupdate_b2_uop_br_tag, // @[dcache.scala:419:14] input [3:0] io_lsu_brupdate_b2_uop_ftq_idx, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_edge_inst, // @[dcache.scala:419:14] input [5:0] io_lsu_brupdate_b2_uop_pc_lob, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_taken, // @[dcache.scala:419:14] input [19:0] io_lsu_brupdate_b2_uop_imm_packed, // @[dcache.scala:419:14] input [11:0] io_lsu_brupdate_b2_uop_csr_addr, // @[dcache.scala:419:14] input [4:0] io_lsu_brupdate_b2_uop_rob_idx, // @[dcache.scala:419:14] input [2:0] io_lsu_brupdate_b2_uop_ldq_idx, // @[dcache.scala:419:14] input [2:0] io_lsu_brupdate_b2_uop_stq_idx, // @[dcache.scala:419:14] input [1:0] io_lsu_brupdate_b2_uop_rxq_idx, // @[dcache.scala:419:14] input [5:0] io_lsu_brupdate_b2_uop_pdst, // @[dcache.scala:419:14] input [5:0] io_lsu_brupdate_b2_uop_prs1, // @[dcache.scala:419:14] input [5:0] io_lsu_brupdate_b2_uop_prs2, // @[dcache.scala:419:14] input [5:0] io_lsu_brupdate_b2_uop_prs3, // @[dcache.scala:419:14] input [3:0] io_lsu_brupdate_b2_uop_ppred, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_prs1_busy, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_prs2_busy, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_prs3_busy, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_ppred_busy, // @[dcache.scala:419:14] input [5:0] io_lsu_brupdate_b2_uop_stale_pdst, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_exception, // @[dcache.scala:419:14] input [63:0] io_lsu_brupdate_b2_uop_exc_cause, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_bypassable, // @[dcache.scala:419:14] input [4:0] io_lsu_brupdate_b2_uop_mem_cmd, // @[dcache.scala:419:14] input [1:0] io_lsu_brupdate_b2_uop_mem_size, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_mem_signed, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_is_fence, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_is_fencei, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_is_amo, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_uses_ldq, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_uses_stq, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_is_sys_pc2epc, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_is_unique, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_flush_on_commit, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_ldst_is_rs1, // @[dcache.scala:419:14] input [5:0] io_lsu_brupdate_b2_uop_ldst, // @[dcache.scala:419:14] input [5:0] io_lsu_brupdate_b2_uop_lrs1, // @[dcache.scala:419:14] input [5:0] io_lsu_brupdate_b2_uop_lrs2, // @[dcache.scala:419:14] input [5:0] io_lsu_brupdate_b2_uop_lrs3, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_ldst_val, // @[dcache.scala:419:14] input [1:0] io_lsu_brupdate_b2_uop_dst_rtype, // @[dcache.scala:419:14] input [1:0] io_lsu_brupdate_b2_uop_lrs1_rtype, // @[dcache.scala:419:14] input [1:0] io_lsu_brupdate_b2_uop_lrs2_rtype, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_frs3_en, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_fp_val, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_fp_single, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_xcpt_pf_if, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_xcpt_ae_if, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_xcpt_ma_if, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_bp_debug_if, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_uop_bp_xcpt_if, // @[dcache.scala:419:14] input [1:0] io_lsu_brupdate_b2_uop_debug_fsrc, // @[dcache.scala:419:14] input [1:0] io_lsu_brupdate_b2_uop_debug_tsrc, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_valid, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_mispredict, // @[dcache.scala:419:14] input io_lsu_brupdate_b2_taken, // @[dcache.scala:419:14] input [2:0] io_lsu_brupdate_b2_cfi_type, // @[dcache.scala:419:14] input [1:0] io_lsu_brupdate_b2_pc_sel, // @[dcache.scala:419:14] input [39:0] io_lsu_brupdate_b2_jalr_target, // @[dcache.scala:419:14] input [20:0] io_lsu_brupdate_b2_target_offset, // @[dcache.scala:419:14] input io_lsu_exception, // @[dcache.scala:419:14] input [4:0] io_lsu_rob_pnr_idx, // @[dcache.scala:419:14] input [4:0] io_lsu_rob_head_idx, // @[dcache.scala:419:14] input io_lsu_release_ready, // @[dcache.scala:419:14] output io_lsu_release_valid, // @[dcache.scala:419:14] output [2:0] io_lsu_release_bits_opcode, // @[dcache.scala:419:14] output [2:0] io_lsu_release_bits_param, // @[dcache.scala:419:14] output [3:0] io_lsu_release_bits_size, // @[dcache.scala:419:14] output [1:0] io_lsu_release_bits_source, // @[dcache.scala:419:14] output [31:0] io_lsu_release_bits_address, // @[dcache.scala:419:14] output [63:0] io_lsu_release_bits_data, // @[dcache.scala:419:14] input io_lsu_force_order, // @[dcache.scala:419:14] output io_lsu_ordered, // @[dcache.scala:419:14] output io_lsu_perf_acquire, // @[dcache.scala:419:14] output io_lsu_perf_release // @[dcache.scala:419:14] ); wire resp_0_bits_is_hella; // @[dcache.scala:846:22] wire [63:0] resp_0_bits_data; // @[dcache.scala:846:22] wire [1:0] resp_0_bits_uop_debug_tsrc; // @[dcache.scala:846:22] wire [1:0] resp_0_bits_uop_debug_fsrc; // @[dcache.scala:846:22] wire resp_0_bits_uop_bp_xcpt_if; // @[dcache.scala:846:22] wire resp_0_bits_uop_bp_debug_if; // @[dcache.scala:846:22] wire resp_0_bits_uop_xcpt_ma_if; // @[dcache.scala:846:22] wire resp_0_bits_uop_xcpt_ae_if; // @[dcache.scala:846:22] wire resp_0_bits_uop_xcpt_pf_if; // @[dcache.scala:846:22] wire resp_0_bits_uop_fp_single; // @[dcache.scala:846:22] wire resp_0_bits_uop_fp_val; // @[dcache.scala:846:22] wire resp_0_bits_uop_frs3_en; // @[dcache.scala:846:22] wire [1:0] resp_0_bits_uop_lrs2_rtype; // @[dcache.scala:846:22] wire [1:0] resp_0_bits_uop_lrs1_rtype; // @[dcache.scala:846:22] wire [1:0] resp_0_bits_uop_dst_rtype; // @[dcache.scala:846:22] wire resp_0_bits_uop_ldst_val; // @[dcache.scala:846:22] wire [5:0] resp_0_bits_uop_lrs3; // @[dcache.scala:846:22] wire [5:0] resp_0_bits_uop_lrs2; // @[dcache.scala:846:22] wire [5:0] resp_0_bits_uop_lrs1; // @[dcache.scala:846:22] wire [5:0] resp_0_bits_uop_ldst; // @[dcache.scala:846:22] wire resp_0_bits_uop_ldst_is_rs1; // @[dcache.scala:846:22] wire resp_0_bits_uop_flush_on_commit; // @[dcache.scala:846:22] wire resp_0_bits_uop_is_unique; // @[dcache.scala:846:22] wire resp_0_bits_uop_is_sys_pc2epc; // @[dcache.scala:846:22] wire resp_0_bits_uop_uses_stq; // @[dcache.scala:846:22] wire resp_0_bits_uop_uses_ldq; // @[dcache.scala:846:22] wire resp_0_bits_uop_is_amo; // @[dcache.scala:846:22] wire resp_0_bits_uop_is_fencei; // @[dcache.scala:846:22] wire resp_0_bits_uop_is_fence; // @[dcache.scala:846:22] wire resp_0_bits_uop_mem_signed; // @[dcache.scala:846:22] wire [1:0] resp_0_bits_uop_mem_size; // @[dcache.scala:846:22] wire [4:0] resp_0_bits_uop_mem_cmd; // @[dcache.scala:846:22] wire resp_0_bits_uop_bypassable; // @[dcache.scala:846:22] wire [63:0] resp_0_bits_uop_exc_cause; // @[dcache.scala:846:22] wire resp_0_bits_uop_exception; // @[dcache.scala:846:22] wire [5:0] resp_0_bits_uop_stale_pdst; // @[dcache.scala:846:22] wire resp_0_bits_uop_ppred_busy; // @[dcache.scala:846:22] wire resp_0_bits_uop_prs3_busy; // @[dcache.scala:846:22] wire resp_0_bits_uop_prs2_busy; // @[dcache.scala:846:22] wire resp_0_bits_uop_prs1_busy; // @[dcache.scala:846:22] wire [3:0] resp_0_bits_uop_ppred; // @[dcache.scala:846:22] wire [5:0] resp_0_bits_uop_prs3; // @[dcache.scala:846:22] wire [5:0] resp_0_bits_uop_prs2; // @[dcache.scala:846:22] wire [5:0] resp_0_bits_uop_prs1; // @[dcache.scala:846:22] wire [5:0] resp_0_bits_uop_pdst; // @[dcache.scala:846:22] wire [1:0] resp_0_bits_uop_rxq_idx; // @[dcache.scala:846:22] wire [2:0] resp_0_bits_uop_stq_idx; // @[dcache.scala:846:22] wire [2:0] resp_0_bits_uop_ldq_idx; // @[dcache.scala:846:22] wire [4:0] resp_0_bits_uop_rob_idx; // @[dcache.scala:846:22] wire [11:0] resp_0_bits_uop_csr_addr; // @[dcache.scala:846:22] wire [19:0] resp_0_bits_uop_imm_packed; // @[dcache.scala:846:22] wire resp_0_bits_uop_taken; // @[dcache.scala:846:22] wire [5:0] resp_0_bits_uop_pc_lob; // @[dcache.scala:846:22] wire resp_0_bits_uop_edge_inst; // @[dcache.scala:846:22] wire [3:0] resp_0_bits_uop_ftq_idx; // @[dcache.scala:846:22] wire [2:0] resp_0_bits_uop_br_tag; // @[dcache.scala:846:22] wire resp_0_bits_uop_is_sfb; // @[dcache.scala:846:22] wire resp_0_bits_uop_is_jal; // @[dcache.scala:846:22] wire resp_0_bits_uop_is_jalr; // @[dcache.scala:846:22] wire resp_0_bits_uop_is_br; // @[dcache.scala:846:22] wire resp_0_bits_uop_iw_p2_poisoned; // @[dcache.scala:846:22] wire resp_0_bits_uop_iw_p1_poisoned; // @[dcache.scala:846:22] wire [1:0] resp_0_bits_uop_iw_state; // @[dcache.scala:846:22] wire [9:0] resp_0_bits_uop_fu_code; // @[dcache.scala:846:22] wire [2:0] resp_0_bits_uop_iq_type; // @[dcache.scala:846:22] wire [39:0] resp_0_bits_uop_debug_pc; // @[dcache.scala:846:22] wire resp_0_bits_uop_is_rvc; // @[dcache.scala:846:22] wire [31:0] resp_0_bits_uop_debug_inst; // @[dcache.scala:846:22] wire [31:0] resp_0_bits_uop_inst; // @[dcache.scala:846:22] wire [6:0] resp_0_bits_uop_uopc; // @[dcache.scala:846:22] wire resp_0_bits_uop_ctrl_is_std; // @[dcache.scala:846:22] wire resp_0_bits_uop_ctrl_is_sta; // @[dcache.scala:846:22] wire resp_0_bits_uop_ctrl_is_load; // @[dcache.scala:846:22] wire [2:0] resp_0_bits_uop_ctrl_csr_cmd; // @[dcache.scala:846:22] wire resp_0_bits_uop_ctrl_fcn_dw; // @[dcache.scala:846:22] wire [4:0] resp_0_bits_uop_ctrl_op_fcn; // @[dcache.scala:846:22] wire [2:0] resp_0_bits_uop_ctrl_imm_sel; // @[dcache.scala:846:22] wire [2:0] resp_0_bits_uop_ctrl_op2_sel; // @[dcache.scala:846:22] wire [1:0] resp_0_bits_uop_ctrl_op1_sel; // @[dcache.scala:846:22] wire [3:0] resp_0_bits_uop_ctrl_br_type; // @[dcache.scala:846:22] wire [1:0] _s2_repl_meta_WIRE_1_coh_state; // @[Mux.scala:30:73] wire [19:0] _s2_repl_meta_WIRE_1_tag; // @[Mux.scala:30:73] wire [1:0] _s2_hit_state_WIRE_1_state; // @[Mux.scala:30:73] wire [63:0] _amoalu_io_out; // @[dcache.scala:896:24] wire _lsu_release_arb_io_in_0_ready; // @[dcache.scala:814:31] wire _lsu_release_arb_io_in_1_ready; // @[dcache.scala:814:31] wire _wbArb_io_in_0_ready; // @[dcache.scala:805:21] wire _wbArb_io_in_1_ready; // @[dcache.scala:805:21] wire _wbArb_io_out_valid; // @[dcache.scala:805:21] wire [19:0] _wbArb_io_out_bits_tag; // @[dcache.scala:805:21] wire [5:0] _wbArb_io_out_bits_idx; // @[dcache.scala:805:21] wire [1:0] _wbArb_io_out_bits_source; // @[dcache.scala:805:21] wire [2:0] _wbArb_io_out_bits_param; // @[dcache.scala:805:21] wire [3:0] _wbArb_io_out_bits_way_en; // @[dcache.scala:805:21] wire _wbArb_io_out_bits_voluntary; // @[dcache.scala:805:21] wire _lfsr_prng_io_out_0; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_1; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_2; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_3; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_4; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_5; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_6; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_7; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_8; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_9; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_10; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_11; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_12; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_13; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_14; // @[PRNG.scala:91:22] wire _lfsr_prng_io_out_15; // @[PRNG.scala:91:22] wire _dataReadArb_io_in_1_ready; // @[dcache.scala:463:27] wire _dataReadArb_io_in_2_ready; // @[dcache.scala:463:27] wire _dataReadArb_io_out_valid; // @[dcache.scala:463:27] wire [3:0] _dataReadArb_io_out_bits_req_0_way_en; // @[dcache.scala:463:27] wire [11:0] _dataReadArb_io_out_bits_req_0_addr; // @[dcache.scala:463:27] wire _dataReadArb_io_out_bits_valid_0; // @[dcache.scala:463:27] wire _dataWriteArb_io_in_1_ready; // @[dcache.scala:461:28] wire [3:0] _dataWriteArb_io_out_bits_way_en; // @[dcache.scala:461:28] wire [11:0] _dataWriteArb_io_out_bits_addr; // @[dcache.scala:461:28] wire [63:0] _dataWriteArb_io_out_bits_data; // @[dcache.scala:461:28] wire _metaReadArb_io_in_1_ready; // @[dcache.scala:445:27] wire _metaReadArb_io_in_2_ready; // @[dcache.scala:445:27] wire _metaReadArb_io_in_3_ready; // @[dcache.scala:445:27] wire _metaReadArb_io_in_4_ready; // @[dcache.scala:445:27] wire _metaReadArb_io_in_5_ready; // @[dcache.scala:445:27] wire _metaReadArb_io_out_valid; // @[dcache.scala:445:27] wire [5:0] _metaReadArb_io_out_bits_req_0_idx; // @[dcache.scala:445:27] wire [3:0] _metaReadArb_io_out_bits_req_0_way_en; // @[dcache.scala:445:27] wire [19:0] _metaReadArb_io_out_bits_req_0_tag; // @[dcache.scala:445:27] wire _metaWriteArb_io_in_0_ready; // @[dcache.scala:443:28] wire _metaWriteArb_io_in_1_ready; // @[dcache.scala:443:28] wire _metaWriteArb_io_out_valid; // @[dcache.scala:443:28] wire [5:0] _metaWriteArb_io_out_bits_idx; // @[dcache.scala:443:28] wire [3:0] _metaWriteArb_io_out_bits_way_en; // @[dcache.scala:443:28] wire [19:0] _metaWriteArb_io_out_bits_tag; // @[dcache.scala:443:28] wire [1:0] _metaWriteArb_io_out_bits_data_coh_state; // @[dcache.scala:443:28] wire [19:0] _metaWriteArb_io_out_bits_data_tag; // @[dcache.scala:443:28] wire _meta_0_io_read_ready; // @[dcache.scala:442:41] wire _meta_0_io_write_ready; // @[dcache.scala:442:41] wire [1:0] _meta_0_io_resp_0_coh_state; // @[dcache.scala:442:41] wire [19:0] _meta_0_io_resp_0_tag; // @[dcache.scala:442:41] wire [1:0] _meta_0_io_resp_1_coh_state; // @[dcache.scala:442:41] wire [19:0] _meta_0_io_resp_1_tag; // @[dcache.scala:442:41] wire [1:0] _meta_0_io_resp_2_coh_state; // @[dcache.scala:442:41] wire [19:0] _meta_0_io_resp_2_tag; // @[dcache.scala:442:41] wire [1:0] _meta_0_io_resp_3_coh_state; // @[dcache.scala:442:41] wire [19:0] _meta_0_io_resp_3_tag; // @[dcache.scala:442:41] wire _mshrs_io_req_0_ready; // @[dcache.scala:433:21] wire _mshrs_io_secondary_miss_0; // @[dcache.scala:433:21] wire _mshrs_io_block_hit_0; // @[dcache.scala:433:21] wire _mshrs_io_mem_grant_ready; // @[dcache.scala:433:21] wire _mshrs_io_refill_valid; // @[dcache.scala:433:21] wire [3:0] _mshrs_io_refill_bits_way_en; // @[dcache.scala:433:21] wire [11:0] _mshrs_io_refill_bits_addr; // @[dcache.scala:433:21] wire [63:0] _mshrs_io_refill_bits_data; // @[dcache.scala:433:21] wire _mshrs_io_meta_write_valid; // @[dcache.scala:433:21] wire [5:0] _mshrs_io_meta_write_bits_idx; // @[dcache.scala:433:21] wire [3:0] _mshrs_io_meta_write_bits_way_en; // @[dcache.scala:433:21] wire [1:0] _mshrs_io_meta_write_bits_data_coh_state; // @[dcache.scala:433:21] wire [19:0] _mshrs_io_meta_write_bits_data_tag; // @[dcache.scala:433:21] wire _mshrs_io_meta_read_valid; // @[dcache.scala:433:21] wire [5:0] _mshrs_io_meta_read_bits_idx; // @[dcache.scala:433:21] wire [3:0] _mshrs_io_meta_read_bits_way_en; // @[dcache.scala:433:21] wire [19:0] _mshrs_io_meta_read_bits_tag; // @[dcache.scala:433:21] wire _mshrs_io_replay_valid; // @[dcache.scala:433:21] wire [4:0] _mshrs_io_replay_bits_uop_mem_cmd; // @[dcache.scala:433:21] wire [39:0] _mshrs_io_replay_bits_addr; // @[dcache.scala:433:21] wire [3:0] _mshrs_io_replay_bits_way_en; // @[dcache.scala:433:21] wire _mshrs_io_wb_req_valid; // @[dcache.scala:433:21] wire [19:0] _mshrs_io_wb_req_bits_tag; // @[dcache.scala:433:21] wire [5:0] _mshrs_io_wb_req_bits_idx; // @[dcache.scala:433:21] wire [1:0] _mshrs_io_wb_req_bits_source; // @[dcache.scala:433:21] wire [2:0] _mshrs_io_wb_req_bits_param; // @[dcache.scala:433:21] wire [3:0] _mshrs_io_wb_req_bits_way_en; // @[dcache.scala:433:21] wire _mshrs_io_fence_rdy; // @[dcache.scala:433:21] wire _mshrs_io_probe_rdy; // @[dcache.scala:433:21] wire _prober_io_req_ready; // @[dcache.scala:432:22] wire _prober_io_rep_valid; // @[dcache.scala:432:22] wire [2:0] _prober_io_rep_bits_param; // @[dcache.scala:432:22] wire [3:0] _prober_io_rep_bits_size; // @[dcache.scala:432:22] wire [1:0] _prober_io_rep_bits_source; // @[dcache.scala:432:22] wire [31:0] _prober_io_rep_bits_address; // @[dcache.scala:432:22] wire _prober_io_meta_read_valid; // @[dcache.scala:432:22] wire [5:0] _prober_io_meta_read_bits_idx; // @[dcache.scala:432:22] wire [19:0] _prober_io_meta_read_bits_tag; // @[dcache.scala:432:22] wire _prober_io_meta_write_valid; // @[dcache.scala:432:22] wire [5:0] _prober_io_meta_write_bits_idx; // @[dcache.scala:432:22] wire [3:0] _prober_io_meta_write_bits_way_en; // @[dcache.scala:432:22] wire [19:0] _prober_io_meta_write_bits_tag; // @[dcache.scala:432:22] wire [1:0] _prober_io_meta_write_bits_data_coh_state; // @[dcache.scala:432:22] wire [19:0] _prober_io_meta_write_bits_data_tag; // @[dcache.scala:432:22] wire _prober_io_wb_req_valid; // @[dcache.scala:432:22] wire [19:0] _prober_io_wb_req_bits_tag; // @[dcache.scala:432:22] wire [5:0] _prober_io_wb_req_bits_idx; // @[dcache.scala:432:22] wire [1:0] _prober_io_wb_req_bits_source; // @[dcache.scala:432:22] wire [2:0] _prober_io_wb_req_bits_param; // @[dcache.scala:432:22] wire [3:0] _prober_io_wb_req_bits_way_en; // @[dcache.scala:432:22] wire _prober_io_mshr_wb_rdy; // @[dcache.scala:432:22] wire _prober_io_lsu_release_valid; // @[dcache.scala:432:22] wire [2:0] _prober_io_lsu_release_bits_param; // @[dcache.scala:432:22] wire [3:0] _prober_io_lsu_release_bits_size; // @[dcache.scala:432:22] wire [1:0] _prober_io_lsu_release_bits_source; // @[dcache.scala:432:22] wire [31:0] _prober_io_lsu_release_bits_address; // @[dcache.scala:432:22] wire _prober_io_state_valid; // @[dcache.scala:432:22] wire [39:0] _prober_io_state_bits; // @[dcache.scala:432:22] wire _wb_io_req_ready; // @[dcache.scala:431:18] wire _wb_io_meta_read_valid; // @[dcache.scala:431:18] wire [5:0] _wb_io_meta_read_bits_idx; // @[dcache.scala:431:18] wire [19:0] _wb_io_meta_read_bits_tag; // @[dcache.scala:431:18] wire _wb_io_resp; // @[dcache.scala:431:18] wire _wb_io_idx_valid; // @[dcache.scala:431:18] wire [5:0] _wb_io_idx_bits; // @[dcache.scala:431:18] wire _wb_io_data_req_valid; // @[dcache.scala:431:18] wire [3:0] _wb_io_data_req_bits_way_en; // @[dcache.scala:431:18] wire [11:0] _wb_io_data_req_bits_addr; // @[dcache.scala:431:18] wire _wb_io_release_valid; // @[dcache.scala:431:18] wire [2:0] _wb_io_release_bits_opcode; // @[dcache.scala:431:18] wire [2:0] _wb_io_release_bits_param; // @[dcache.scala:431:18] wire [31:0] _wb_io_release_bits_address; // @[dcache.scala:431:18] wire [63:0] _wb_io_release_bits_data; // @[dcache.scala:431:18] wire _wb_io_lsu_release_valid; // @[dcache.scala:431:18] wire [2:0] _wb_io_lsu_release_bits_param; // @[dcache.scala:431:18] wire [31:0] _wb_io_lsu_release_bits_address; // @[dcache.scala:431:18] wire [63:0] _wb_io_lsu_release_bits_data; // @[dcache.scala:431:18] wire auto_out_a_ready_0 = auto_out_a_ready; // @[dcache.scala:413:7] wire auto_out_b_valid_0 = auto_out_b_valid; // @[dcache.scala:413:7] wire [2:0] auto_out_b_bits_opcode_0 = auto_out_b_bits_opcode; // @[dcache.scala:413:7] wire [1:0] auto_out_b_bits_param_0 = auto_out_b_bits_param; // @[dcache.scala:413:7] wire [3:0] auto_out_b_bits_size_0 = auto_out_b_bits_size; // @[dcache.scala:413:7] wire [1:0] auto_out_b_bits_source_0 = auto_out_b_bits_source; // @[dcache.scala:413:7] wire [31:0] auto_out_b_bits_address_0 = auto_out_b_bits_address; // @[dcache.scala:413:7] wire [7:0] auto_out_b_bits_mask_0 = auto_out_b_bits_mask; // @[dcache.scala:413:7] wire [63:0] auto_out_b_bits_data_0 = auto_out_b_bits_data; // @[dcache.scala:413:7] wire auto_out_b_bits_corrupt_0 = auto_out_b_bits_corrupt; // @[dcache.scala:413:7] wire auto_out_c_ready_0 = auto_out_c_ready; // @[dcache.scala:413:7] wire auto_out_d_valid_0 = auto_out_d_valid; // @[dcache.scala:413:7] wire [2:0] auto_out_d_bits_opcode_0 = auto_out_d_bits_opcode; // @[dcache.scala:413:7] wire [1:0] auto_out_d_bits_param_0 = auto_out_d_bits_param; // @[dcache.scala:413:7] wire [3:0] auto_out_d_bits_size_0 = auto_out_d_bits_size; // @[dcache.scala:413:7] wire [1:0] auto_out_d_bits_source_0 = auto_out_d_bits_source; // @[dcache.scala:413:7] wire [2:0] auto_out_d_bits_sink_0 = auto_out_d_bits_sink; // @[dcache.scala:413:7] wire auto_out_d_bits_denied_0 = auto_out_d_bits_denied; // @[dcache.scala:413:7] wire [63:0] auto_out_d_bits_data_0 = auto_out_d_bits_data; // @[dcache.scala:413:7] wire auto_out_d_bits_corrupt_0 = auto_out_d_bits_corrupt; // @[dcache.scala:413:7] wire auto_out_e_ready_0 = auto_out_e_ready; // @[dcache.scala:413:7] wire io_lsu_req_valid_0 = io_lsu_req_valid; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_valid_0 = io_lsu_req_bits_0_valid; // @[dcache.scala:413:7] wire [6:0] io_lsu_req_bits_0_bits_uop_uopc_0 = io_lsu_req_bits_0_bits_uop_uopc; // @[dcache.scala:413:7] wire [31:0] io_lsu_req_bits_0_bits_uop_inst_0 = io_lsu_req_bits_0_bits_uop_inst; // @[dcache.scala:413:7] wire [31:0] io_lsu_req_bits_0_bits_uop_debug_inst_0 = io_lsu_req_bits_0_bits_uop_debug_inst; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_is_rvc_0 = io_lsu_req_bits_0_bits_uop_is_rvc; // @[dcache.scala:413:7] wire [39:0] io_lsu_req_bits_0_bits_uop_debug_pc_0 = io_lsu_req_bits_0_bits_uop_debug_pc; // @[dcache.scala:413:7] wire [2:0] io_lsu_req_bits_0_bits_uop_iq_type_0 = io_lsu_req_bits_0_bits_uop_iq_type; // @[dcache.scala:413:7] wire [9:0] io_lsu_req_bits_0_bits_uop_fu_code_0 = io_lsu_req_bits_0_bits_uop_fu_code; // @[dcache.scala:413:7] wire [3:0] io_lsu_req_bits_0_bits_uop_ctrl_br_type_0 = io_lsu_req_bits_0_bits_uop_ctrl_br_type; // @[dcache.scala:413:7] wire [1:0] io_lsu_req_bits_0_bits_uop_ctrl_op1_sel_0 = io_lsu_req_bits_0_bits_uop_ctrl_op1_sel; // @[dcache.scala:413:7] wire [2:0] io_lsu_req_bits_0_bits_uop_ctrl_op2_sel_0 = io_lsu_req_bits_0_bits_uop_ctrl_op2_sel; // @[dcache.scala:413:7] wire [2:0] io_lsu_req_bits_0_bits_uop_ctrl_imm_sel_0 = io_lsu_req_bits_0_bits_uop_ctrl_imm_sel; // @[dcache.scala:413:7] wire [4:0] io_lsu_req_bits_0_bits_uop_ctrl_op_fcn_0 = io_lsu_req_bits_0_bits_uop_ctrl_op_fcn; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_ctrl_fcn_dw_0 = io_lsu_req_bits_0_bits_uop_ctrl_fcn_dw; // @[dcache.scala:413:7] wire [2:0] io_lsu_req_bits_0_bits_uop_ctrl_csr_cmd_0 = io_lsu_req_bits_0_bits_uop_ctrl_csr_cmd; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_ctrl_is_load_0 = io_lsu_req_bits_0_bits_uop_ctrl_is_load; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_ctrl_is_sta_0 = io_lsu_req_bits_0_bits_uop_ctrl_is_sta; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_ctrl_is_std_0 = io_lsu_req_bits_0_bits_uop_ctrl_is_std; // @[dcache.scala:413:7] wire [1:0] io_lsu_req_bits_0_bits_uop_iw_state_0 = io_lsu_req_bits_0_bits_uop_iw_state; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_iw_p1_poisoned_0 = io_lsu_req_bits_0_bits_uop_iw_p1_poisoned; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_iw_p2_poisoned_0 = io_lsu_req_bits_0_bits_uop_iw_p2_poisoned; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_is_br_0 = io_lsu_req_bits_0_bits_uop_is_br; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_is_jalr_0 = io_lsu_req_bits_0_bits_uop_is_jalr; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_is_jal_0 = io_lsu_req_bits_0_bits_uop_is_jal; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_is_sfb_0 = io_lsu_req_bits_0_bits_uop_is_sfb; // @[dcache.scala:413:7] wire [7:0] io_lsu_req_bits_0_bits_uop_br_mask_0 = io_lsu_req_bits_0_bits_uop_br_mask; // @[dcache.scala:413:7] wire [2:0] io_lsu_req_bits_0_bits_uop_br_tag_0 = io_lsu_req_bits_0_bits_uop_br_tag; // @[dcache.scala:413:7] wire [3:0] io_lsu_req_bits_0_bits_uop_ftq_idx_0 = io_lsu_req_bits_0_bits_uop_ftq_idx; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_edge_inst_0 = io_lsu_req_bits_0_bits_uop_edge_inst; // @[dcache.scala:413:7] wire [5:0] io_lsu_req_bits_0_bits_uop_pc_lob_0 = io_lsu_req_bits_0_bits_uop_pc_lob; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_taken_0 = io_lsu_req_bits_0_bits_uop_taken; // @[dcache.scala:413:7] wire [19:0] io_lsu_req_bits_0_bits_uop_imm_packed_0 = io_lsu_req_bits_0_bits_uop_imm_packed; // @[dcache.scala:413:7] wire [11:0] io_lsu_req_bits_0_bits_uop_csr_addr_0 = io_lsu_req_bits_0_bits_uop_csr_addr; // @[dcache.scala:413:7] wire [4:0] io_lsu_req_bits_0_bits_uop_rob_idx_0 = io_lsu_req_bits_0_bits_uop_rob_idx; // @[dcache.scala:413:7] wire [2:0] io_lsu_req_bits_0_bits_uop_ldq_idx_0 = io_lsu_req_bits_0_bits_uop_ldq_idx; // @[dcache.scala:413:7] wire [2:0] io_lsu_req_bits_0_bits_uop_stq_idx_0 = io_lsu_req_bits_0_bits_uop_stq_idx; // @[dcache.scala:413:7] wire [1:0] io_lsu_req_bits_0_bits_uop_rxq_idx_0 = io_lsu_req_bits_0_bits_uop_rxq_idx; // @[dcache.scala:413:7] wire [5:0] io_lsu_req_bits_0_bits_uop_pdst_0 = io_lsu_req_bits_0_bits_uop_pdst; // @[dcache.scala:413:7] wire [5:0] io_lsu_req_bits_0_bits_uop_prs1_0 = io_lsu_req_bits_0_bits_uop_prs1; // @[dcache.scala:413:7] wire [5:0] io_lsu_req_bits_0_bits_uop_prs2_0 = io_lsu_req_bits_0_bits_uop_prs2; // @[dcache.scala:413:7] wire [5:0] io_lsu_req_bits_0_bits_uop_prs3_0 = io_lsu_req_bits_0_bits_uop_prs3; // @[dcache.scala:413:7] wire [3:0] io_lsu_req_bits_0_bits_uop_ppred_0 = io_lsu_req_bits_0_bits_uop_ppred; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_prs1_busy_0 = io_lsu_req_bits_0_bits_uop_prs1_busy; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_prs2_busy_0 = io_lsu_req_bits_0_bits_uop_prs2_busy; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_prs3_busy_0 = io_lsu_req_bits_0_bits_uop_prs3_busy; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_ppred_busy_0 = io_lsu_req_bits_0_bits_uop_ppred_busy; // @[dcache.scala:413:7] wire [5:0] io_lsu_req_bits_0_bits_uop_stale_pdst_0 = io_lsu_req_bits_0_bits_uop_stale_pdst; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_exception_0 = io_lsu_req_bits_0_bits_uop_exception; // @[dcache.scala:413:7] wire [63:0] io_lsu_req_bits_0_bits_uop_exc_cause_0 = io_lsu_req_bits_0_bits_uop_exc_cause; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_bypassable_0 = io_lsu_req_bits_0_bits_uop_bypassable; // @[dcache.scala:413:7] wire [4:0] io_lsu_req_bits_0_bits_uop_mem_cmd_0 = io_lsu_req_bits_0_bits_uop_mem_cmd; // @[dcache.scala:413:7] wire [1:0] io_lsu_req_bits_0_bits_uop_mem_size_0 = io_lsu_req_bits_0_bits_uop_mem_size; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_mem_signed_0 = io_lsu_req_bits_0_bits_uop_mem_signed; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_is_fence_0 = io_lsu_req_bits_0_bits_uop_is_fence; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_is_fencei_0 = io_lsu_req_bits_0_bits_uop_is_fencei; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_is_amo_0 = io_lsu_req_bits_0_bits_uop_is_amo; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_uses_ldq_0 = io_lsu_req_bits_0_bits_uop_uses_ldq; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_uses_stq_0 = io_lsu_req_bits_0_bits_uop_uses_stq; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_is_sys_pc2epc_0 = io_lsu_req_bits_0_bits_uop_is_sys_pc2epc; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_is_unique_0 = io_lsu_req_bits_0_bits_uop_is_unique; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_flush_on_commit_0 = io_lsu_req_bits_0_bits_uop_flush_on_commit; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_ldst_is_rs1_0 = io_lsu_req_bits_0_bits_uop_ldst_is_rs1; // @[dcache.scala:413:7] wire [5:0] io_lsu_req_bits_0_bits_uop_ldst_0 = io_lsu_req_bits_0_bits_uop_ldst; // @[dcache.scala:413:7] wire [5:0] io_lsu_req_bits_0_bits_uop_lrs1_0 = io_lsu_req_bits_0_bits_uop_lrs1; // @[dcache.scala:413:7] wire [5:0] io_lsu_req_bits_0_bits_uop_lrs2_0 = io_lsu_req_bits_0_bits_uop_lrs2; // @[dcache.scala:413:7] wire [5:0] io_lsu_req_bits_0_bits_uop_lrs3_0 = io_lsu_req_bits_0_bits_uop_lrs3; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_ldst_val_0 = io_lsu_req_bits_0_bits_uop_ldst_val; // @[dcache.scala:413:7] wire [1:0] io_lsu_req_bits_0_bits_uop_dst_rtype_0 = io_lsu_req_bits_0_bits_uop_dst_rtype; // @[dcache.scala:413:7] wire [1:0] io_lsu_req_bits_0_bits_uop_lrs1_rtype_0 = io_lsu_req_bits_0_bits_uop_lrs1_rtype; // @[dcache.scala:413:7] wire [1:0] io_lsu_req_bits_0_bits_uop_lrs2_rtype_0 = io_lsu_req_bits_0_bits_uop_lrs2_rtype; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_frs3_en_0 = io_lsu_req_bits_0_bits_uop_frs3_en; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_fp_val_0 = io_lsu_req_bits_0_bits_uop_fp_val; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_fp_single_0 = io_lsu_req_bits_0_bits_uop_fp_single; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_xcpt_pf_if_0 = io_lsu_req_bits_0_bits_uop_xcpt_pf_if; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_xcpt_ae_if_0 = io_lsu_req_bits_0_bits_uop_xcpt_ae_if; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_xcpt_ma_if_0 = io_lsu_req_bits_0_bits_uop_xcpt_ma_if; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_bp_debug_if_0 = io_lsu_req_bits_0_bits_uop_bp_debug_if; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_uop_bp_xcpt_if_0 = io_lsu_req_bits_0_bits_uop_bp_xcpt_if; // @[dcache.scala:413:7] wire [1:0] io_lsu_req_bits_0_bits_uop_debug_fsrc_0 = io_lsu_req_bits_0_bits_uop_debug_fsrc; // @[dcache.scala:413:7] wire [1:0] io_lsu_req_bits_0_bits_uop_debug_tsrc_0 = io_lsu_req_bits_0_bits_uop_debug_tsrc; // @[dcache.scala:413:7] wire [39:0] io_lsu_req_bits_0_bits_addr_0 = io_lsu_req_bits_0_bits_addr; // @[dcache.scala:413:7] wire [63:0] io_lsu_req_bits_0_bits_data_0 = io_lsu_req_bits_0_bits_data; // @[dcache.scala:413:7] wire io_lsu_req_bits_0_bits_is_hella_0 = io_lsu_req_bits_0_bits_is_hella; // @[dcache.scala:413:7] wire io_lsu_s1_kill_0_0 = io_lsu_s1_kill_0; // @[dcache.scala:413:7] wire [7:0] io_lsu_brupdate_b1_resolve_mask_0 = io_lsu_brupdate_b1_resolve_mask; // @[dcache.scala:413:7] wire [7:0] io_lsu_brupdate_b1_mispredict_mask_0 = io_lsu_brupdate_b1_mispredict_mask; // @[dcache.scala:413:7] wire [6:0] io_lsu_brupdate_b2_uop_uopc_0 = io_lsu_brupdate_b2_uop_uopc; // @[dcache.scala:413:7] wire [31:0] io_lsu_brupdate_b2_uop_inst_0 = io_lsu_brupdate_b2_uop_inst; // @[dcache.scala:413:7] wire [31:0] io_lsu_brupdate_b2_uop_debug_inst_0 = io_lsu_brupdate_b2_uop_debug_inst; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_is_rvc_0 = io_lsu_brupdate_b2_uop_is_rvc; // @[dcache.scala:413:7] wire [39:0] io_lsu_brupdate_b2_uop_debug_pc_0 = io_lsu_brupdate_b2_uop_debug_pc; // @[dcache.scala:413:7] wire [2:0] io_lsu_brupdate_b2_uop_iq_type_0 = io_lsu_brupdate_b2_uop_iq_type; // @[dcache.scala:413:7] wire [9:0] io_lsu_brupdate_b2_uop_fu_code_0 = io_lsu_brupdate_b2_uop_fu_code; // @[dcache.scala:413:7] wire [3:0] io_lsu_brupdate_b2_uop_ctrl_br_type_0 = io_lsu_brupdate_b2_uop_ctrl_br_type; // @[dcache.scala:413:7] wire [1:0] io_lsu_brupdate_b2_uop_ctrl_op1_sel_0 = io_lsu_brupdate_b2_uop_ctrl_op1_sel; // @[dcache.scala:413:7] wire [2:0] io_lsu_brupdate_b2_uop_ctrl_op2_sel_0 = io_lsu_brupdate_b2_uop_ctrl_op2_sel; // @[dcache.scala:413:7] wire [2:0] io_lsu_brupdate_b2_uop_ctrl_imm_sel_0 = io_lsu_brupdate_b2_uop_ctrl_imm_sel; // @[dcache.scala:413:7] wire [4:0] io_lsu_brupdate_b2_uop_ctrl_op_fcn_0 = io_lsu_brupdate_b2_uop_ctrl_op_fcn; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_ctrl_fcn_dw_0 = io_lsu_brupdate_b2_uop_ctrl_fcn_dw; // @[dcache.scala:413:7] wire [2:0] io_lsu_brupdate_b2_uop_ctrl_csr_cmd_0 = io_lsu_brupdate_b2_uop_ctrl_csr_cmd; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_ctrl_is_load_0 = io_lsu_brupdate_b2_uop_ctrl_is_load; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_ctrl_is_sta_0 = io_lsu_brupdate_b2_uop_ctrl_is_sta; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_ctrl_is_std_0 = io_lsu_brupdate_b2_uop_ctrl_is_std; // @[dcache.scala:413:7] wire [1:0] io_lsu_brupdate_b2_uop_iw_state_0 = io_lsu_brupdate_b2_uop_iw_state; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_iw_p1_poisoned_0 = io_lsu_brupdate_b2_uop_iw_p1_poisoned; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_iw_p2_poisoned_0 = io_lsu_brupdate_b2_uop_iw_p2_poisoned; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_is_br_0 = io_lsu_brupdate_b2_uop_is_br; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_is_jalr_0 = io_lsu_brupdate_b2_uop_is_jalr; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_is_jal_0 = io_lsu_brupdate_b2_uop_is_jal; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_is_sfb_0 = io_lsu_brupdate_b2_uop_is_sfb; // @[dcache.scala:413:7] wire [7:0] io_lsu_brupdate_b2_uop_br_mask_0 = io_lsu_brupdate_b2_uop_br_mask; // @[dcache.scala:413:7] wire [2:0] io_lsu_brupdate_b2_uop_br_tag_0 = io_lsu_brupdate_b2_uop_br_tag; // @[dcache.scala:413:7] wire [3:0] io_lsu_brupdate_b2_uop_ftq_idx_0 = io_lsu_brupdate_b2_uop_ftq_idx; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_edge_inst_0 = io_lsu_brupdate_b2_uop_edge_inst; // @[dcache.scala:413:7] wire [5:0] io_lsu_brupdate_b2_uop_pc_lob_0 = io_lsu_brupdate_b2_uop_pc_lob; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_taken_0 = io_lsu_brupdate_b2_uop_taken; // @[dcache.scala:413:7] wire [19:0] io_lsu_brupdate_b2_uop_imm_packed_0 = io_lsu_brupdate_b2_uop_imm_packed; // @[dcache.scala:413:7] wire [11:0] io_lsu_brupdate_b2_uop_csr_addr_0 = io_lsu_brupdate_b2_uop_csr_addr; // @[dcache.scala:413:7] wire [4:0] io_lsu_brupdate_b2_uop_rob_idx_0 = io_lsu_brupdate_b2_uop_rob_idx; // @[dcache.scala:413:7] wire [2:0] io_lsu_brupdate_b2_uop_ldq_idx_0 = io_lsu_brupdate_b2_uop_ldq_idx; // @[dcache.scala:413:7] wire [2:0] io_lsu_brupdate_b2_uop_stq_idx_0 = io_lsu_brupdate_b2_uop_stq_idx; // @[dcache.scala:413:7] wire [1:0] io_lsu_brupdate_b2_uop_rxq_idx_0 = io_lsu_brupdate_b2_uop_rxq_idx; // @[dcache.scala:413:7] wire [5:0] io_lsu_brupdate_b2_uop_pdst_0 = io_lsu_brupdate_b2_uop_pdst; // @[dcache.scala:413:7] wire [5:0] io_lsu_brupdate_b2_uop_prs1_0 = io_lsu_brupdate_b2_uop_prs1; // @[dcache.scala:413:7] wire [5:0] io_lsu_brupdate_b2_uop_prs2_0 = io_lsu_brupdate_b2_uop_prs2; // @[dcache.scala:413:7] wire [5:0] io_lsu_brupdate_b2_uop_prs3_0 = io_lsu_brupdate_b2_uop_prs3; // @[dcache.scala:413:7] wire [3:0] io_lsu_brupdate_b2_uop_ppred_0 = io_lsu_brupdate_b2_uop_ppred; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_prs1_busy_0 = io_lsu_brupdate_b2_uop_prs1_busy; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_prs2_busy_0 = io_lsu_brupdate_b2_uop_prs2_busy; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_prs3_busy_0 = io_lsu_brupdate_b2_uop_prs3_busy; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_ppred_busy_0 = io_lsu_brupdate_b2_uop_ppred_busy; // @[dcache.scala:413:7] wire [5:0] io_lsu_brupdate_b2_uop_stale_pdst_0 = io_lsu_brupdate_b2_uop_stale_pdst; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_exception_0 = io_lsu_brupdate_b2_uop_exception; // @[dcache.scala:413:7] wire [63:0] io_lsu_brupdate_b2_uop_exc_cause_0 = io_lsu_brupdate_b2_uop_exc_cause; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_bypassable_0 = io_lsu_brupdate_b2_uop_bypassable; // @[dcache.scala:413:7] wire [4:0] io_lsu_brupdate_b2_uop_mem_cmd_0 = io_lsu_brupdate_b2_uop_mem_cmd; // @[dcache.scala:413:7] wire [1:0] io_lsu_brupdate_b2_uop_mem_size_0 = io_lsu_brupdate_b2_uop_mem_size; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_mem_signed_0 = io_lsu_brupdate_b2_uop_mem_signed; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_is_fence_0 = io_lsu_brupdate_b2_uop_is_fence; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_is_fencei_0 = io_lsu_brupdate_b2_uop_is_fencei; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_is_amo_0 = io_lsu_brupdate_b2_uop_is_amo; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_uses_ldq_0 = io_lsu_brupdate_b2_uop_uses_ldq; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_uses_stq_0 = io_lsu_brupdate_b2_uop_uses_stq; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_is_sys_pc2epc_0 = io_lsu_brupdate_b2_uop_is_sys_pc2epc; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_is_unique_0 = io_lsu_brupdate_b2_uop_is_unique; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_flush_on_commit_0 = io_lsu_brupdate_b2_uop_flush_on_commit; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_ldst_is_rs1_0 = io_lsu_brupdate_b2_uop_ldst_is_rs1; // @[dcache.scala:413:7] wire [5:0] io_lsu_brupdate_b2_uop_ldst_0 = io_lsu_brupdate_b2_uop_ldst; // @[dcache.scala:413:7] wire [5:0] io_lsu_brupdate_b2_uop_lrs1_0 = io_lsu_brupdate_b2_uop_lrs1; // @[dcache.scala:413:7] wire [5:0] io_lsu_brupdate_b2_uop_lrs2_0 = io_lsu_brupdate_b2_uop_lrs2; // @[dcache.scala:413:7] wire [5:0] io_lsu_brupdate_b2_uop_lrs3_0 = io_lsu_brupdate_b2_uop_lrs3; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_ldst_val_0 = io_lsu_brupdate_b2_uop_ldst_val; // @[dcache.scala:413:7] wire [1:0] io_lsu_brupdate_b2_uop_dst_rtype_0 = io_lsu_brupdate_b2_uop_dst_rtype; // @[dcache.scala:413:7] wire [1:0] io_lsu_brupdate_b2_uop_lrs1_rtype_0 = io_lsu_brupdate_b2_uop_lrs1_rtype; // @[dcache.scala:413:7] wire [1:0] io_lsu_brupdate_b2_uop_lrs2_rtype_0 = io_lsu_brupdate_b2_uop_lrs2_rtype; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_frs3_en_0 = io_lsu_brupdate_b2_uop_frs3_en; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_fp_val_0 = io_lsu_brupdate_b2_uop_fp_val; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_fp_single_0 = io_lsu_brupdate_b2_uop_fp_single; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_xcpt_pf_if_0 = io_lsu_brupdate_b2_uop_xcpt_pf_if; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_xcpt_ae_if_0 = io_lsu_brupdate_b2_uop_xcpt_ae_if; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_xcpt_ma_if_0 = io_lsu_brupdate_b2_uop_xcpt_ma_if; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_bp_debug_if_0 = io_lsu_brupdate_b2_uop_bp_debug_if; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_uop_bp_xcpt_if_0 = io_lsu_brupdate_b2_uop_bp_xcpt_if; // @[dcache.scala:413:7] wire [1:0] io_lsu_brupdate_b2_uop_debug_fsrc_0 = io_lsu_brupdate_b2_uop_debug_fsrc; // @[dcache.scala:413:7] wire [1:0] io_lsu_brupdate_b2_uop_debug_tsrc_0 = io_lsu_brupdate_b2_uop_debug_tsrc; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_valid_0 = io_lsu_brupdate_b2_valid; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_mispredict_0 = io_lsu_brupdate_b2_mispredict; // @[dcache.scala:413:7] wire io_lsu_brupdate_b2_taken_0 = io_lsu_brupdate_b2_taken; // @[dcache.scala:413:7] wire [2:0] io_lsu_brupdate_b2_cfi_type_0 = io_lsu_brupdate_b2_cfi_type; // @[dcache.scala:413:7] wire [1:0] io_lsu_brupdate_b2_pc_sel_0 = io_lsu_brupdate_b2_pc_sel; // @[dcache.scala:413:7] wire [39:0] io_lsu_brupdate_b2_jalr_target_0 = io_lsu_brupdate_b2_jalr_target; // @[dcache.scala:413:7] wire [20:0] io_lsu_brupdate_b2_target_offset_0 = io_lsu_brupdate_b2_target_offset; // @[dcache.scala:413:7] wire io_lsu_exception_0 = io_lsu_exception; // @[dcache.scala:413:7] wire [4:0] io_lsu_rob_pnr_idx_0 = io_lsu_rob_pnr_idx; // @[dcache.scala:413:7] wire [4:0] io_lsu_rob_head_idx_0 = io_lsu_rob_head_idx; // @[dcache.scala:413:7] wire io_lsu_release_ready_0 = io_lsu_release_ready; // @[dcache.scala:413:7] wire io_lsu_force_order_0 = io_lsu_force_order; // @[dcache.scala:413:7] wire auto_out_a_bits_corrupt = 1'h0; // @[dcache.scala:413:7] wire auto_out_c_bits_corrupt = 1'h0; // @[dcache.scala:413:7] wire io_lsu_release_bits_corrupt = 1'h0; // @[dcache.scala:413:7] wire nodeOut_a_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire nodeOut_c_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire mshr_read_req_0_uop_is_rvc = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_ctrl_fcn_dw = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_ctrl_is_load = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_ctrl_is_sta = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_ctrl_is_std = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_iw_p1_poisoned = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_iw_p2_poisoned = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_is_br = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_is_jalr = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_is_jal = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_is_sfb = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_edge_inst = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_taken = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_prs1_busy = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_prs2_busy = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_prs3_busy = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_ppred_busy = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_exception = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_bypassable = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_mem_signed = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_is_fence = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_is_fencei = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_is_amo = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_uses_ldq = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_uses_stq = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_is_sys_pc2epc = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_is_unique = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_flush_on_commit = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_ldst_is_rs1 = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_ldst_val = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_frs3_en = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_fp_val = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_fp_single = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_xcpt_pf_if = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_xcpt_ae_if = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_xcpt_ma_if = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_bp_debug_if = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_bp_xcpt_if = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_is_hella = 1'h0; // @[dcache.scala:517:27] wire mshr_read_req_0_uop_uop_is_rvc = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_ctrl_fcn_dw = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_ctrl_is_load = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_ctrl_is_sta = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_ctrl_is_std = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_iw_p1_poisoned = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_iw_p2_poisoned = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_is_br = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_is_jalr = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_is_jal = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_is_sfb = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_edge_inst = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_taken = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_prs1_busy = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_prs2_busy = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_prs3_busy = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_ppred_busy = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_exception = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_bypassable = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_mem_signed = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_is_fence = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_is_fencei = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_is_amo = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_uses_ldq = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_uses_stq = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_is_sys_pc2epc = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_is_unique = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_flush_on_commit = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_ldst_is_rs1 = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_ldst_val = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_frs3_en = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_fp_val = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_fp_single = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_xcpt_pf_if = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_xcpt_ae_if = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_xcpt_ma_if = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_bp_debug_if = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_uop_bp_xcpt_if = 1'h0; // @[consts.scala:269:19] wire mshr_read_req_0_uop_cs_fcn_dw = 1'h0; // @[consts.scala:279:18] wire mshr_read_req_0_uop_cs_is_load = 1'h0; // @[consts.scala:279:18] wire mshr_read_req_0_uop_cs_is_sta = 1'h0; // @[consts.scala:279:18] wire mshr_read_req_0_uop_cs_is_std = 1'h0; // @[consts.scala:279:18] wire wb_req_0_uop_is_rvc = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_ctrl_fcn_dw = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_ctrl_is_load = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_ctrl_is_sta = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_ctrl_is_std = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_iw_p1_poisoned = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_iw_p2_poisoned = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_is_br = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_is_jalr = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_is_jal = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_is_sfb = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_edge_inst = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_taken = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_prs1_busy = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_prs2_busy = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_prs3_busy = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_ppred_busy = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_exception = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_bypassable = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_mem_signed = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_is_fence = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_is_fencei = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_is_amo = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_uses_ldq = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_uses_stq = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_is_sys_pc2epc = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_is_unique = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_flush_on_commit = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_ldst_is_rs1 = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_ldst_val = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_frs3_en = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_fp_val = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_fp_single = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_xcpt_pf_if = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_xcpt_ae_if = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_xcpt_ma_if = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_bp_debug_if = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_bp_xcpt_if = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_is_hella = 1'h0; // @[dcache.scala:532:20] wire wb_req_0_uop_uop_is_rvc = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_ctrl_fcn_dw = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_ctrl_is_load = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_ctrl_is_sta = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_ctrl_is_std = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_iw_p1_poisoned = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_iw_p2_poisoned = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_is_br = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_is_jalr = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_is_jal = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_is_sfb = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_edge_inst = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_taken = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_prs1_busy = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_prs2_busy = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_prs3_busy = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_ppred_busy = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_exception = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_bypassable = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_mem_signed = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_is_fence = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_is_fencei = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_is_amo = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_uses_ldq = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_uses_stq = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_is_sys_pc2epc = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_is_unique = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_flush_on_commit = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_ldst_is_rs1 = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_ldst_val = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_frs3_en = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_fp_val = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_fp_single = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_xcpt_pf_if = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_xcpt_ae_if = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_xcpt_ma_if = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_bp_debug_if = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_uop_bp_xcpt_if = 1'h0; // @[consts.scala:269:19] wire wb_req_0_uop_cs_fcn_dw = 1'h0; // @[consts.scala:279:18] wire wb_req_0_uop_cs_is_load = 1'h0; // @[consts.scala:279:18] wire wb_req_0_uop_cs_is_sta = 1'h0; // @[consts.scala:279:18] wire wb_req_0_uop_cs_is_std = 1'h0; // @[consts.scala:279:18] wire prober_req_0_uop_is_rvc = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_ctrl_fcn_dw = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_ctrl_is_load = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_ctrl_is_sta = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_ctrl_is_std = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_iw_p1_poisoned = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_iw_p2_poisoned = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_is_br = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_is_jalr = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_is_jal = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_is_sfb = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_edge_inst = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_taken = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_prs1_busy = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_prs2_busy = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_prs3_busy = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_ppred_busy = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_exception = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_bypassable = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_mem_signed = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_is_fence = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_is_fencei = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_is_amo = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_uses_ldq = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_uses_stq = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_is_sys_pc2epc = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_is_unique = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_flush_on_commit = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_ldst_is_rs1 = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_ldst_val = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_frs3_en = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_fp_val = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_fp_single = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_xcpt_pf_if = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_xcpt_ae_if = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_xcpt_ma_if = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_bp_debug_if = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_bp_xcpt_if = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_is_hella = 1'h0; // @[dcache.scala:553:26] wire prober_req_0_uop_uop_is_rvc = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_ctrl_fcn_dw = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_ctrl_is_load = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_ctrl_is_sta = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_ctrl_is_std = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_iw_p1_poisoned = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_iw_p2_poisoned = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_is_br = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_is_jalr = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_is_jal = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_is_sfb = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_edge_inst = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_taken = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_prs1_busy = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_prs2_busy = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_prs3_busy = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_ppred_busy = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_exception = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_bypassable = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_mem_signed = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_is_fence = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_is_fencei = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_is_amo = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_uses_ldq = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_uses_stq = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_is_sys_pc2epc = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_is_unique = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_flush_on_commit = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_ldst_is_rs1 = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_ldst_val = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_frs3_en = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_fp_val = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_fp_single = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_xcpt_pf_if = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_xcpt_ae_if = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_xcpt_ma_if = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_bp_debug_if = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_uop_bp_xcpt_if = 1'h0; // @[consts.scala:269:19] wire prober_req_0_uop_cs_fcn_dw = 1'h0; // @[consts.scala:279:18] wire prober_req_0_uop_cs_is_load = 1'h0; // @[consts.scala:279:18] wire prober_req_0_uop_cs_is_sta = 1'h0; // @[consts.scala:279:18] wire prober_req_0_uop_cs_is_std = 1'h0; // @[consts.scala:279:18] wire prefetch_fire = 1'h0; // @[Decoupled.scala:51:35] wire prefetch_req_0_uop_is_rvc = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_ctrl_fcn_dw = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_ctrl_is_load = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_ctrl_is_sta = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_ctrl_is_std = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_iw_p1_poisoned = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_iw_p2_poisoned = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_is_br = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_is_jalr = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_is_jal = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_is_sfb = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_edge_inst = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_taken = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_prs1_busy = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_prs2_busy = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_prs3_busy = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_ppred_busy = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_exception = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_bypassable = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_mem_signed = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_is_fence = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_is_fencei = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_is_amo = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_uses_ldq = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_uses_stq = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_is_sys_pc2epc = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_is_unique = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_flush_on_commit = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_ldst_is_rs1 = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_ldst_val = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_frs3_en = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_fp_val = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_fp_single = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_xcpt_pf_if = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_xcpt_ae_if = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_xcpt_ma_if = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_bp_debug_if = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_uop_bp_xcpt_if = 1'h0; // @[dcache.scala:568:27] wire prefetch_req_0_is_hella = 1'h0; // @[dcache.scala:568:27] wire _s0_valid_WIRE_2_0 = 1'h0; // @[dcache.scala:581:82] wire _s2_has_permission_r_T_26 = 1'h0; // @[Misc.scala:35:9] wire _s2_has_permission_r_T_29 = 1'h0; // @[Misc.scala:35:9] wire _s2_has_permission_r_T_32 = 1'h0; // @[Misc.scala:35:9] wire _s2_has_permission_r_T_35 = 1'h0; // @[Misc.scala:35:9] wire _s2_has_permission_r_T_38 = 1'h0; // @[Misc.scala:35:9] wire _s2_new_hit_state_r_T_26 = 1'h0; // @[Misc.scala:35:9] wire _s2_new_hit_state_r_T_29 = 1'h0; // @[Misc.scala:35:9] wire _s2_new_hit_state_r_T_32 = 1'h0; // @[Misc.scala:35:9] wire _s2_new_hit_state_r_T_35 = 1'h0; // @[Misc.scala:35:9] wire _s2_new_hit_state_r_T_38 = 1'h0; // @[Misc.scala:35:9] wire s2_word_idx_0 = 1'h0; // @[dcache.scala:427:49] wire s2_nack_data_0 = 1'h0; // @[dcache.scala:427:49] wire opdata_1 = 1'h0; // @[Edges.scala:102:36] wire _state_WIRE_0 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_1 = 1'h0; // @[Arbiter.scala:88:34] wire _nodeOut_c_bits_WIRE_corrupt = 1'h0; // @[Mux.scala:30:73] wire _nodeOut_c_bits_T = 1'h0; // @[Mux.scala:30:73] wire _nodeOut_c_bits_T_1 = 1'h0; // @[Mux.scala:30:73] wire _nodeOut_c_bits_T_2 = 1'h0; // @[Mux.scala:30:73] wire _nodeOut_c_bits_WIRE_1 = 1'h0; // @[Mux.scala:30:73] wire cache_resp_0_bits_data_doZero = 1'h0; // @[AMOALU.scala:43:31] wire cache_resp_0_bits_data_doZero_1 = 1'h0; // @[AMOALU.scala:43:31] wire _s0_valid_WIRE_1_0 = 1'h1; // @[dcache.scala:581:48] wire _mshrs_io_req_0_valid_T_6 = 1'h1; // @[dcache.scala:750:29] wire _uncache_respond_T_1 = 1'h1; // @[dcache.scala:849:51] wire [6:0] mshr_read_req_0_uop_uopc = 7'h0; // @[dcache.scala:517:27] wire [6:0] mshr_read_req_0_uop_uop_uopc = 7'h0; // @[consts.scala:269:19] wire [6:0] wb_req_0_uop_uopc = 7'h0; // @[dcache.scala:532:20] wire [6:0] wb_req_0_uop_uop_uopc = 7'h0; // @[consts.scala:269:19] wire [6:0] prober_req_0_uop_uopc = 7'h0; // @[dcache.scala:553:26] wire [6:0] prober_req_0_uop_uop_uopc = 7'h0; // @[consts.scala:269:19] wire [6:0] prefetch_req_0_uop_uopc = 7'h0; // @[dcache.scala:568:27] wire [6:0] _s2_data_word_prebypass_T = 7'h0; // @[dcache.scala:825:69] wire [63:0] mshr_read_req_0_uop_exc_cause = 64'h0; // @[dcache.scala:517:27] wire [63:0] mshr_read_req_0_data = 64'h0; // @[dcache.scala:517:27] wire [63:0] mshr_read_req_0_uop_uop_exc_cause = 64'h0; // @[consts.scala:269:19] wire [63:0] wb_req_0_uop_exc_cause = 64'h0; // @[dcache.scala:532:20] wire [63:0] wb_req_0_data = 64'h0; // @[dcache.scala:532:20] wire [63:0] wb_req_0_uop_uop_exc_cause = 64'h0; // @[consts.scala:269:19] wire [63:0] prober_req_0_uop_exc_cause = 64'h0; // @[dcache.scala:553:26] wire [63:0] prober_req_0_data = 64'h0; // @[dcache.scala:553:26] wire [63:0] prober_req_0_uop_uop_exc_cause = 64'h0; // @[consts.scala:269:19] wire [63:0] prefetch_req_0_uop_exc_cause = 64'h0; // @[dcache.scala:568:27] wire [63:0] prefetch_req_0_data = 64'h0; // @[dcache.scala:568:27] wire [63:0] _nodeOut_c_bits_T_4 = 64'h0; // @[Mux.scala:30:73] wire [8:0] maskedBeats_1 = 9'h0; // @[Arbiter.scala:82:69] wire [8:0] decode = 9'h7; // @[Edges.scala:220:59] wire [11:0] _decode_T_2 = 12'h3F; // @[package.scala:243:46] wire [11:0] _decode_T_1 = 12'hFC0; // @[package.scala:243:76] wire [26:0] _decode_T = 27'h3FFC0; // @[package.scala:243:71] wire [1:0] mshr_read_req_0_uop_dst_rtype = 2'h2; // @[dcache.scala:517:27] wire [1:0] mshr_read_req_0_uop_uop_dst_rtype = 2'h2; // @[consts.scala:269:19] wire [1:0] wb_req_0_uop_dst_rtype = 2'h2; // @[dcache.scala:532:20] wire [1:0] wb_req_0_uop_uop_dst_rtype = 2'h2; // @[consts.scala:269:19] wire [1:0] prober_req_0_uop_dst_rtype = 2'h2; // @[dcache.scala:553:26] wire [1:0] prober_req_0_uop_uop_dst_rtype = 2'h2; // @[consts.scala:269:19] wire [3:0] _s2_has_permission_r_T_10 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] _s2_new_hit_state_r_T_10 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] _s2_has_permission_r_T_24 = 4'hC; // @[Metadata.scala:72:10] wire [3:0] _s2_new_hit_state_r_T_24 = 4'hC; // @[Metadata.scala:72:10] wire [3:0] _s2_has_permission_r_T_22 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _s2_new_hit_state_r_T_22 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _s2_has_permission_r_T_20 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _s2_new_hit_state_r_T_20 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _s2_has_permission_r_T_18 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _s2_new_hit_state_r_T_18 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] mshr_read_req_0_uop_ctrl_br_type = 4'h0; // @[dcache.scala:517:27] wire [3:0] mshr_read_req_0_uop_ftq_idx = 4'h0; // @[dcache.scala:517:27] wire [3:0] mshr_read_req_0_uop_ppred = 4'h0; // @[dcache.scala:517:27] wire [3:0] mshr_read_req_0_uop_uop_ctrl_br_type = 4'h0; // @[consts.scala:269:19] wire [3:0] mshr_read_req_0_uop_uop_ftq_idx = 4'h0; // @[consts.scala:269:19] wire [3:0] mshr_read_req_0_uop_uop_ppred = 4'h0; // @[consts.scala:269:19] wire [3:0] mshr_read_req_0_uop_cs_br_type = 4'h0; // @[consts.scala:279:18] wire [3:0] wb_req_0_uop_ctrl_br_type = 4'h0; // @[dcache.scala:532:20] wire [3:0] wb_req_0_uop_ftq_idx = 4'h0; // @[dcache.scala:532:20] wire [3:0] wb_req_0_uop_ppred = 4'h0; // @[dcache.scala:532:20] wire [3:0] wb_req_0_uop_uop_ctrl_br_type = 4'h0; // @[consts.scala:269:19] wire [3:0] wb_req_0_uop_uop_ftq_idx = 4'h0; // @[consts.scala:269:19] wire [3:0] wb_req_0_uop_uop_ppred = 4'h0; // @[consts.scala:269:19] wire [3:0] wb_req_0_uop_cs_br_type = 4'h0; // @[consts.scala:279:18] wire [3:0] prober_req_0_uop_ctrl_br_type = 4'h0; // @[dcache.scala:553:26] wire [3:0] prober_req_0_uop_ftq_idx = 4'h0; // @[dcache.scala:553:26] wire [3:0] prober_req_0_uop_ppred = 4'h0; // @[dcache.scala:553:26] wire [3:0] prober_req_0_uop_uop_ctrl_br_type = 4'h0; // @[consts.scala:269:19] wire [3:0] prober_req_0_uop_uop_ftq_idx = 4'h0; // @[consts.scala:269:19] wire [3:0] prober_req_0_uop_uop_ppred = 4'h0; // @[consts.scala:269:19] wire [3:0] prober_req_0_uop_cs_br_type = 4'h0; // @[consts.scala:279:18] wire [3:0] prefetch_req_0_uop_ctrl_br_type = 4'h0; // @[dcache.scala:568:27] wire [3:0] prefetch_req_0_uop_ftq_idx = 4'h0; // @[dcache.scala:568:27] wire [3:0] prefetch_req_0_uop_ppred = 4'h0; // @[dcache.scala:568:27] wire [3:0] _s2_has_permission_r_T_16 = 4'h0; // @[Metadata.scala:68:10] wire [3:0] _s2_new_hit_state_r_T_16 = 4'h0; // @[Metadata.scala:68:10] wire [1:0] mshr_read_req_0_uop_ctrl_op1_sel = 2'h0; // @[dcache.scala:517:27] wire [1:0] mshr_read_req_0_uop_iw_state = 2'h0; // @[dcache.scala:517:27] wire [1:0] mshr_read_req_0_uop_rxq_idx = 2'h0; // @[dcache.scala:517:27] wire [1:0] mshr_read_req_0_uop_mem_size = 2'h0; // @[dcache.scala:517:27] wire [1:0] mshr_read_req_0_uop_lrs1_rtype = 2'h0; // @[dcache.scala:517:27] wire [1:0] mshr_read_req_0_uop_lrs2_rtype = 2'h0; // @[dcache.scala:517:27] wire [1:0] mshr_read_req_0_uop_debug_fsrc = 2'h0; // @[dcache.scala:517:27] wire [1:0] mshr_read_req_0_uop_debug_tsrc = 2'h0; // @[dcache.scala:517:27] wire [1:0] mshr_read_req_0_uop_uop_ctrl_op1_sel = 2'h0; // @[consts.scala:269:19] wire [1:0] mshr_read_req_0_uop_uop_iw_state = 2'h0; // @[consts.scala:269:19] wire [1:0] mshr_read_req_0_uop_uop_rxq_idx = 2'h0; // @[consts.scala:269:19] wire [1:0] mshr_read_req_0_uop_uop_mem_size = 2'h0; // @[consts.scala:269:19] wire [1:0] mshr_read_req_0_uop_uop_lrs1_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] mshr_read_req_0_uop_uop_lrs2_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] mshr_read_req_0_uop_uop_debug_fsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] mshr_read_req_0_uop_uop_debug_tsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] mshr_read_req_0_uop_cs_op1_sel = 2'h0; // @[consts.scala:279:18] wire [1:0] wb_req_0_uop_ctrl_op1_sel = 2'h0; // @[dcache.scala:532:20] wire [1:0] wb_req_0_uop_iw_state = 2'h0; // @[dcache.scala:532:20] wire [1:0] wb_req_0_uop_rxq_idx = 2'h0; // @[dcache.scala:532:20] wire [1:0] wb_req_0_uop_mem_size = 2'h0; // @[dcache.scala:532:20] wire [1:0] wb_req_0_uop_lrs1_rtype = 2'h0; // @[dcache.scala:532:20] wire [1:0] wb_req_0_uop_lrs2_rtype = 2'h0; // @[dcache.scala:532:20] wire [1:0] wb_req_0_uop_debug_fsrc = 2'h0; // @[dcache.scala:532:20] wire [1:0] wb_req_0_uop_debug_tsrc = 2'h0; // @[dcache.scala:532:20] wire [1:0] wb_req_0_uop_uop_ctrl_op1_sel = 2'h0; // @[consts.scala:269:19] wire [1:0] wb_req_0_uop_uop_iw_state = 2'h0; // @[consts.scala:269:19] wire [1:0] wb_req_0_uop_uop_rxq_idx = 2'h0; // @[consts.scala:269:19] wire [1:0] wb_req_0_uop_uop_mem_size = 2'h0; // @[consts.scala:269:19] wire [1:0] wb_req_0_uop_uop_lrs1_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] wb_req_0_uop_uop_lrs2_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] wb_req_0_uop_uop_debug_fsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] wb_req_0_uop_uop_debug_tsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] wb_req_0_uop_cs_op1_sel = 2'h0; // @[consts.scala:279:18] wire [1:0] prober_req_0_uop_ctrl_op1_sel = 2'h0; // @[dcache.scala:553:26] wire [1:0] prober_req_0_uop_iw_state = 2'h0; // @[dcache.scala:553:26] wire [1:0] prober_req_0_uop_rxq_idx = 2'h0; // @[dcache.scala:553:26] wire [1:0] prober_req_0_uop_mem_size = 2'h0; // @[dcache.scala:553:26] wire [1:0] prober_req_0_uop_lrs1_rtype = 2'h0; // @[dcache.scala:553:26] wire [1:0] prober_req_0_uop_lrs2_rtype = 2'h0; // @[dcache.scala:553:26] wire [1:0] prober_req_0_uop_debug_fsrc = 2'h0; // @[dcache.scala:553:26] wire [1:0] prober_req_0_uop_debug_tsrc = 2'h0; // @[dcache.scala:553:26] wire [1:0] prober_req_0_uop_uop_ctrl_op1_sel = 2'h0; // @[consts.scala:269:19] wire [1:0] prober_req_0_uop_uop_iw_state = 2'h0; // @[consts.scala:269:19] wire [1:0] prober_req_0_uop_uop_rxq_idx = 2'h0; // @[consts.scala:269:19] wire [1:0] prober_req_0_uop_uop_mem_size = 2'h0; // @[consts.scala:269:19] wire [1:0] prober_req_0_uop_uop_lrs1_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] prober_req_0_uop_uop_lrs2_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] prober_req_0_uop_uop_debug_fsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] prober_req_0_uop_uop_debug_tsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] prober_req_0_uop_cs_op1_sel = 2'h0; // @[consts.scala:279:18] wire [1:0] prefetch_req_0_uop_ctrl_op1_sel = 2'h0; // @[dcache.scala:568:27] wire [1:0] prefetch_req_0_uop_iw_state = 2'h0; // @[dcache.scala:568:27] wire [1:0] prefetch_req_0_uop_rxq_idx = 2'h0; // @[dcache.scala:568:27] wire [1:0] prefetch_req_0_uop_mem_size = 2'h0; // @[dcache.scala:568:27] wire [1:0] prefetch_req_0_uop_dst_rtype = 2'h0; // @[dcache.scala:568:27] wire [1:0] prefetch_req_0_uop_lrs1_rtype = 2'h0; // @[dcache.scala:568:27] wire [1:0] prefetch_req_0_uop_lrs2_rtype = 2'h0; // @[dcache.scala:568:27] wire [1:0] prefetch_req_0_uop_debug_fsrc = 2'h0; // @[dcache.scala:568:27] wire [1:0] prefetch_req_0_uop_debug_tsrc = 2'h0; // @[dcache.scala:568:27] wire [1:0] _s2_has_permission_r_T_1 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _s2_has_permission_r_T_3 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _s2_has_permission_r_T_5 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _s2_has_permission_r_T_15 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _s2_new_hit_state_r_T_1 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _s2_new_hit_state_r_T_3 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _s2_new_hit_state_r_T_5 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _s2_new_hit_state_r_T_15 = 2'h0; // @[Metadata.scala:26:15] wire [3:0] _s2_has_permission_r_T_14 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _s2_new_hit_state_r_T_14 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _dataReadArb_io_in_2_bits_req_0_way_en_T = 4'hF; // @[dcache.scala:491:48] wire [3:0] _s2_has_permission_r_T_12 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] _s2_new_hit_state_r_T_12 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] _s2_has_permission_r_T_8 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _s2_new_hit_state_r_T_8 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _s2_has_permission_r_T_6 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] _s2_new_hit_state_r_T_6 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] _s2_has_permission_r_T_4 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _s2_new_hit_state_r_T_4 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _s2_has_permission_r_T_2 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _s2_new_hit_state_r_T_2 = 4'h3; // @[Metadata.scala:60:10] wire [33:0] _metaReadArb_io_in_5_bits_req_0_idx_T = 34'h0; // @[dcache.scala:573:74] wire [39:0] mshr_read_req_0_uop_debug_pc = 40'h0; // @[dcache.scala:517:27] wire [39:0] mshr_read_req_0_uop_uop_debug_pc = 40'h0; // @[consts.scala:269:19] wire [39:0] wb_req_0_uop_debug_pc = 40'h0; // @[dcache.scala:532:20] wire [39:0] wb_req_0_uop_uop_debug_pc = 40'h0; // @[consts.scala:269:19] wire [39:0] prober_req_0_uop_debug_pc = 40'h0; // @[dcache.scala:553:26] wire [39:0] prober_req_0_uop_uop_debug_pc = 40'h0; // @[consts.scala:269:19] wire [39:0] prefetch_req_0_uop_debug_pc = 40'h0; // @[dcache.scala:568:27] wire [39:0] prefetch_req_0_addr = 40'h0; // @[dcache.scala:568:27] wire [5:0] mshr_read_req_0_uop_pc_lob = 6'h0; // @[dcache.scala:517:27] wire [5:0] mshr_read_req_0_uop_pdst = 6'h0; // @[dcache.scala:517:27] wire [5:0] mshr_read_req_0_uop_prs1 = 6'h0; // @[dcache.scala:517:27] wire [5:0] mshr_read_req_0_uop_prs2 = 6'h0; // @[dcache.scala:517:27] wire [5:0] mshr_read_req_0_uop_prs3 = 6'h0; // @[dcache.scala:517:27] wire [5:0] mshr_read_req_0_uop_stale_pdst = 6'h0; // @[dcache.scala:517:27] wire [5:0] mshr_read_req_0_uop_ldst = 6'h0; // @[dcache.scala:517:27] wire [5:0] mshr_read_req_0_uop_lrs1 = 6'h0; // @[dcache.scala:517:27] wire [5:0] mshr_read_req_0_uop_lrs2 = 6'h0; // @[dcache.scala:517:27] wire [5:0] mshr_read_req_0_uop_lrs3 = 6'h0; // @[dcache.scala:517:27] wire [5:0] mshr_read_req_0_uop_uop_pc_lob = 6'h0; // @[consts.scala:269:19] wire [5:0] mshr_read_req_0_uop_uop_pdst = 6'h0; // @[consts.scala:269:19] wire [5:0] mshr_read_req_0_uop_uop_prs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] mshr_read_req_0_uop_uop_prs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] mshr_read_req_0_uop_uop_prs3 = 6'h0; // @[consts.scala:269:19] wire [5:0] mshr_read_req_0_uop_uop_stale_pdst = 6'h0; // @[consts.scala:269:19] wire [5:0] mshr_read_req_0_uop_uop_ldst = 6'h0; // @[consts.scala:269:19] wire [5:0] mshr_read_req_0_uop_uop_lrs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] mshr_read_req_0_uop_uop_lrs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] mshr_read_req_0_uop_uop_lrs3 = 6'h0; // @[consts.scala:269:19] wire [5:0] wb_req_0_uop_pc_lob = 6'h0; // @[dcache.scala:532:20] wire [5:0] wb_req_0_uop_pdst = 6'h0; // @[dcache.scala:532:20] wire [5:0] wb_req_0_uop_prs1 = 6'h0; // @[dcache.scala:532:20] wire [5:0] wb_req_0_uop_prs2 = 6'h0; // @[dcache.scala:532:20] wire [5:0] wb_req_0_uop_prs3 = 6'h0; // @[dcache.scala:532:20] wire [5:0] wb_req_0_uop_stale_pdst = 6'h0; // @[dcache.scala:532:20] wire [5:0] wb_req_0_uop_ldst = 6'h0; // @[dcache.scala:532:20] wire [5:0] wb_req_0_uop_lrs1 = 6'h0; // @[dcache.scala:532:20] wire [5:0] wb_req_0_uop_lrs2 = 6'h0; // @[dcache.scala:532:20] wire [5:0] wb_req_0_uop_lrs3 = 6'h0; // @[dcache.scala:532:20] wire [5:0] wb_req_0_uop_uop_pc_lob = 6'h0; // @[consts.scala:269:19] wire [5:0] wb_req_0_uop_uop_pdst = 6'h0; // @[consts.scala:269:19] wire [5:0] wb_req_0_uop_uop_prs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] wb_req_0_uop_uop_prs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] wb_req_0_uop_uop_prs3 = 6'h0; // @[consts.scala:269:19] wire [5:0] wb_req_0_uop_uop_stale_pdst = 6'h0; // @[consts.scala:269:19] wire [5:0] wb_req_0_uop_uop_ldst = 6'h0; // @[consts.scala:269:19] wire [5:0] wb_req_0_uop_uop_lrs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] wb_req_0_uop_uop_lrs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] wb_req_0_uop_uop_lrs3 = 6'h0; // @[consts.scala:269:19] wire [5:0] prober_req_0_uop_pc_lob = 6'h0; // @[dcache.scala:553:26] wire [5:0] prober_req_0_uop_pdst = 6'h0; // @[dcache.scala:553:26] wire [5:0] prober_req_0_uop_prs1 = 6'h0; // @[dcache.scala:553:26] wire [5:0] prober_req_0_uop_prs2 = 6'h0; // @[dcache.scala:553:26] wire [5:0] prober_req_0_uop_prs3 = 6'h0; // @[dcache.scala:553:26] wire [5:0] prober_req_0_uop_stale_pdst = 6'h0; // @[dcache.scala:553:26] wire [5:0] prober_req_0_uop_ldst = 6'h0; // @[dcache.scala:553:26] wire [5:0] prober_req_0_uop_lrs1 = 6'h0; // @[dcache.scala:553:26] wire [5:0] prober_req_0_uop_lrs2 = 6'h0; // @[dcache.scala:553:26] wire [5:0] prober_req_0_uop_lrs3 = 6'h0; // @[dcache.scala:553:26] wire [5:0] prober_req_0_uop_uop_pc_lob = 6'h0; // @[consts.scala:269:19] wire [5:0] prober_req_0_uop_uop_pdst = 6'h0; // @[consts.scala:269:19] wire [5:0] prober_req_0_uop_uop_prs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] prober_req_0_uop_uop_prs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] prober_req_0_uop_uop_prs3 = 6'h0; // @[consts.scala:269:19] wire [5:0] prober_req_0_uop_uop_stale_pdst = 6'h0; // @[consts.scala:269:19] wire [5:0] prober_req_0_uop_uop_ldst = 6'h0; // @[consts.scala:269:19] wire [5:0] prober_req_0_uop_uop_lrs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] prober_req_0_uop_uop_lrs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] prober_req_0_uop_uop_lrs3 = 6'h0; // @[consts.scala:269:19] wire [5:0] prefetch_req_0_uop_pc_lob = 6'h0; // @[dcache.scala:568:27] wire [5:0] prefetch_req_0_uop_pdst = 6'h0; // @[dcache.scala:568:27] wire [5:0] prefetch_req_0_uop_prs1 = 6'h0; // @[dcache.scala:568:27] wire [5:0] prefetch_req_0_uop_prs2 = 6'h0; // @[dcache.scala:568:27] wire [5:0] prefetch_req_0_uop_prs3 = 6'h0; // @[dcache.scala:568:27] wire [5:0] prefetch_req_0_uop_stale_pdst = 6'h0; // @[dcache.scala:568:27] wire [5:0] prefetch_req_0_uop_ldst = 6'h0; // @[dcache.scala:568:27] wire [5:0] prefetch_req_0_uop_lrs1 = 6'h0; // @[dcache.scala:568:27] wire [5:0] prefetch_req_0_uop_lrs2 = 6'h0; // @[dcache.scala:568:27] wire [5:0] prefetch_req_0_uop_lrs3 = 6'h0; // @[dcache.scala:568:27] wire [4:0] mshr_read_req_0_uop_ctrl_op_fcn = 5'h0; // @[dcache.scala:517:27] wire [4:0] mshr_read_req_0_uop_rob_idx = 5'h0; // @[dcache.scala:517:27] wire [4:0] mshr_read_req_0_uop_mem_cmd = 5'h0; // @[dcache.scala:517:27] wire [4:0] mshr_read_req_0_uop_uop_ctrl_op_fcn = 5'h0; // @[consts.scala:269:19] wire [4:0] mshr_read_req_0_uop_uop_rob_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] mshr_read_req_0_uop_uop_mem_cmd = 5'h0; // @[consts.scala:269:19] wire [4:0] mshr_read_req_0_uop_cs_op_fcn = 5'h0; // @[consts.scala:279:18] wire [4:0] wb_req_0_uop_ctrl_op_fcn = 5'h0; // @[dcache.scala:532:20] wire [4:0] wb_req_0_uop_rob_idx = 5'h0; // @[dcache.scala:532:20] wire [4:0] wb_req_0_uop_mem_cmd = 5'h0; // @[dcache.scala:532:20] wire [4:0] wb_req_0_uop_uop_ctrl_op_fcn = 5'h0; // @[consts.scala:269:19] wire [4:0] wb_req_0_uop_uop_rob_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] wb_req_0_uop_uop_mem_cmd = 5'h0; // @[consts.scala:269:19] wire [4:0] wb_req_0_uop_cs_op_fcn = 5'h0; // @[consts.scala:279:18] wire [4:0] prober_req_0_uop_ctrl_op_fcn = 5'h0; // @[dcache.scala:553:26] wire [4:0] prober_req_0_uop_rob_idx = 5'h0; // @[dcache.scala:553:26] wire [4:0] prober_req_0_uop_mem_cmd = 5'h0; // @[dcache.scala:553:26] wire [4:0] prober_req_0_uop_uop_ctrl_op_fcn = 5'h0; // @[consts.scala:269:19] wire [4:0] prober_req_0_uop_uop_rob_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] prober_req_0_uop_uop_mem_cmd = 5'h0; // @[consts.scala:269:19] wire [4:0] prober_req_0_uop_cs_op_fcn = 5'h0; // @[consts.scala:279:18] wire [4:0] prefetch_req_0_uop_ctrl_op_fcn = 5'h0; // @[dcache.scala:568:27] wire [4:0] prefetch_req_0_uop_rob_idx = 5'h0; // @[dcache.scala:568:27] wire [4:0] prefetch_req_0_uop_mem_cmd = 5'h0; // @[dcache.scala:568:27] wire [2:0] mshr_read_req_0_uop_iq_type = 3'h0; // @[dcache.scala:517:27] wire [2:0] mshr_read_req_0_uop_ctrl_op2_sel = 3'h0; // @[dcache.scala:517:27] wire [2:0] mshr_read_req_0_uop_ctrl_imm_sel = 3'h0; // @[dcache.scala:517:27] wire [2:0] mshr_read_req_0_uop_ctrl_csr_cmd = 3'h0; // @[dcache.scala:517:27] wire [2:0] mshr_read_req_0_uop_br_tag = 3'h0; // @[dcache.scala:517:27] wire [2:0] mshr_read_req_0_uop_ldq_idx = 3'h0; // @[dcache.scala:517:27] wire [2:0] mshr_read_req_0_uop_stq_idx = 3'h0; // @[dcache.scala:517:27] wire [2:0] mshr_read_req_0_uop_uop_iq_type = 3'h0; // @[consts.scala:269:19] wire [2:0] mshr_read_req_0_uop_uop_ctrl_op2_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] mshr_read_req_0_uop_uop_ctrl_imm_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] mshr_read_req_0_uop_uop_ctrl_csr_cmd = 3'h0; // @[consts.scala:269:19] wire [2:0] mshr_read_req_0_uop_uop_br_tag = 3'h0; // @[consts.scala:269:19] wire [2:0] mshr_read_req_0_uop_uop_ldq_idx = 3'h0; // @[consts.scala:269:19] wire [2:0] mshr_read_req_0_uop_uop_stq_idx = 3'h0; // @[consts.scala:269:19] wire [2:0] mshr_read_req_0_uop_cs_op2_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] mshr_read_req_0_uop_cs_imm_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] mshr_read_req_0_uop_cs_csr_cmd = 3'h0; // @[consts.scala:279:18] wire [2:0] wb_req_0_uop_iq_type = 3'h0; // @[dcache.scala:532:20] wire [2:0] wb_req_0_uop_ctrl_op2_sel = 3'h0; // @[dcache.scala:532:20] wire [2:0] wb_req_0_uop_ctrl_imm_sel = 3'h0; // @[dcache.scala:532:20] wire [2:0] wb_req_0_uop_ctrl_csr_cmd = 3'h0; // @[dcache.scala:532:20] wire [2:0] wb_req_0_uop_br_tag = 3'h0; // @[dcache.scala:532:20] wire [2:0] wb_req_0_uop_ldq_idx = 3'h0; // @[dcache.scala:532:20] wire [2:0] wb_req_0_uop_stq_idx = 3'h0; // @[dcache.scala:532:20] wire [2:0] wb_req_0_uop_uop_iq_type = 3'h0; // @[consts.scala:269:19] wire [2:0] wb_req_0_uop_uop_ctrl_op2_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] wb_req_0_uop_uop_ctrl_imm_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] wb_req_0_uop_uop_ctrl_csr_cmd = 3'h0; // @[consts.scala:269:19] wire [2:0] wb_req_0_uop_uop_br_tag = 3'h0; // @[consts.scala:269:19] wire [2:0] wb_req_0_uop_uop_ldq_idx = 3'h0; // @[consts.scala:269:19] wire [2:0] wb_req_0_uop_uop_stq_idx = 3'h0; // @[consts.scala:269:19] wire [2:0] wb_req_0_uop_cs_op2_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] wb_req_0_uop_cs_imm_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] wb_req_0_uop_cs_csr_cmd = 3'h0; // @[consts.scala:279:18] wire [2:0] prober_req_0_uop_iq_type = 3'h0; // @[dcache.scala:553:26] wire [2:0] prober_req_0_uop_ctrl_op2_sel = 3'h0; // @[dcache.scala:553:26] wire [2:0] prober_req_0_uop_ctrl_imm_sel = 3'h0; // @[dcache.scala:553:26] wire [2:0] prober_req_0_uop_ctrl_csr_cmd = 3'h0; // @[dcache.scala:553:26] wire [2:0] prober_req_0_uop_br_tag = 3'h0; // @[dcache.scala:553:26] wire [2:0] prober_req_0_uop_ldq_idx = 3'h0; // @[dcache.scala:553:26] wire [2:0] prober_req_0_uop_stq_idx = 3'h0; // @[dcache.scala:553:26] wire [2:0] prober_req_0_uop_uop_iq_type = 3'h0; // @[consts.scala:269:19] wire [2:0] prober_req_0_uop_uop_ctrl_op2_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] prober_req_0_uop_uop_ctrl_imm_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] prober_req_0_uop_uop_ctrl_csr_cmd = 3'h0; // @[consts.scala:269:19] wire [2:0] prober_req_0_uop_uop_br_tag = 3'h0; // @[consts.scala:269:19] wire [2:0] prober_req_0_uop_uop_ldq_idx = 3'h0; // @[consts.scala:269:19] wire [2:0] prober_req_0_uop_uop_stq_idx = 3'h0; // @[consts.scala:269:19] wire [2:0] prober_req_0_uop_cs_op2_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] prober_req_0_uop_cs_imm_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] prober_req_0_uop_cs_csr_cmd = 3'h0; // @[consts.scala:279:18] wire [2:0] prefetch_req_0_uop_iq_type = 3'h0; // @[dcache.scala:568:27] wire [2:0] prefetch_req_0_uop_ctrl_op2_sel = 3'h0; // @[dcache.scala:568:27] wire [2:0] prefetch_req_0_uop_ctrl_imm_sel = 3'h0; // @[dcache.scala:568:27] wire [2:0] prefetch_req_0_uop_ctrl_csr_cmd = 3'h0; // @[dcache.scala:568:27] wire [2:0] prefetch_req_0_uop_br_tag = 3'h0; // @[dcache.scala:568:27] wire [2:0] prefetch_req_0_uop_ldq_idx = 3'h0; // @[dcache.scala:568:27] wire [2:0] prefetch_req_0_uop_stq_idx = 3'h0; // @[dcache.scala:568:27] wire [11:0] mshr_read_req_0_uop_csr_addr = 12'h0; // @[dcache.scala:517:27] wire [11:0] mshr_read_req_0_uop_uop_csr_addr = 12'h0; // @[consts.scala:269:19] wire [11:0] wb_req_0_uop_csr_addr = 12'h0; // @[dcache.scala:532:20] wire [11:0] wb_req_0_uop_uop_csr_addr = 12'h0; // @[consts.scala:269:19] wire [11:0] prober_req_0_uop_csr_addr = 12'h0; // @[dcache.scala:553:26] wire [11:0] prober_req_0_uop_uop_csr_addr = 12'h0; // @[consts.scala:269:19] wire [11:0] prefetch_req_0_uop_csr_addr = 12'h0; // @[dcache.scala:568:27] wire [19:0] mshr_read_req_0_uop_imm_packed = 20'h0; // @[dcache.scala:517:27] wire [19:0] mshr_read_req_0_uop_uop_imm_packed = 20'h0; // @[consts.scala:269:19] wire [19:0] wb_req_0_uop_imm_packed = 20'h0; // @[dcache.scala:532:20] wire [19:0] wb_req_0_uop_uop_imm_packed = 20'h0; // @[consts.scala:269:19] wire [19:0] prober_req_0_uop_imm_packed = 20'h0; // @[dcache.scala:553:26] wire [19:0] prober_req_0_uop_uop_imm_packed = 20'h0; // @[consts.scala:269:19] wire [19:0] prefetch_req_0_uop_imm_packed = 20'h0; // @[dcache.scala:568:27] wire [7:0] mshr_read_req_0_uop_br_mask = 8'h0; // @[dcache.scala:517:27] wire [7:0] mshr_read_req_0_uop_uop_br_mask = 8'h0; // @[consts.scala:269:19] wire [7:0] wb_req_0_uop_br_mask = 8'h0; // @[dcache.scala:532:20] wire [7:0] wb_req_0_uop_uop_br_mask = 8'h0; // @[consts.scala:269:19] wire [7:0] prober_req_0_uop_br_mask = 8'h0; // @[dcache.scala:553:26] wire [7:0] prober_req_0_uop_uop_br_mask = 8'h0; // @[consts.scala:269:19] wire [7:0] prefetch_req_0_uop_br_mask = 8'h0; // @[dcache.scala:568:27] wire [9:0] mshr_read_req_0_uop_fu_code = 10'h0; // @[dcache.scala:517:27] wire [9:0] mshr_read_req_0_uop_uop_fu_code = 10'h0; // @[consts.scala:269:19] wire [9:0] wb_req_0_uop_fu_code = 10'h0; // @[dcache.scala:532:20] wire [9:0] wb_req_0_uop_uop_fu_code = 10'h0; // @[consts.scala:269:19] wire [9:0] prober_req_0_uop_fu_code = 10'h0; // @[dcache.scala:553:26] wire [9:0] prober_req_0_uop_uop_fu_code = 10'h0; // @[consts.scala:269:19] wire [9:0] prefetch_req_0_uop_fu_code = 10'h0; // @[dcache.scala:568:27] wire [31:0] mshr_read_req_0_uop_inst = 32'h0; // @[dcache.scala:517:27] wire [31:0] mshr_read_req_0_uop_debug_inst = 32'h0; // @[dcache.scala:517:27] wire [31:0] mshr_read_req_0_uop_uop_inst = 32'h0; // @[consts.scala:269:19] wire [31:0] mshr_read_req_0_uop_uop_debug_inst = 32'h0; // @[consts.scala:269:19] wire [31:0] wb_req_0_uop_inst = 32'h0; // @[dcache.scala:532:20] wire [31:0] wb_req_0_uop_debug_inst = 32'h0; // @[dcache.scala:532:20] wire [31:0] wb_req_0_uop_uop_inst = 32'h0; // @[consts.scala:269:19] wire [31:0] wb_req_0_uop_uop_debug_inst = 32'h0; // @[consts.scala:269:19] wire [31:0] prober_req_0_uop_inst = 32'h0; // @[dcache.scala:553:26] wire [31:0] prober_req_0_uop_debug_inst = 32'h0; // @[dcache.scala:553:26] wire [31:0] prober_req_0_uop_uop_inst = 32'h0; // @[consts.scala:269:19] wire [31:0] prober_req_0_uop_uop_debug_inst = 32'h0; // @[consts.scala:269:19] wire [31:0] prefetch_req_0_uop_inst = 32'h0; // @[dcache.scala:568:27] wire [31:0] prefetch_req_0_uop_debug_inst = 32'h0; // @[dcache.scala:568:27] wire [1:0] _s2_has_permission_r_T_7 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _s2_has_permission_r_T_9 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _s2_has_permission_r_T_17 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _s2_has_permission_r_T_19 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _s2_new_hit_state_r_T_7 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _s2_new_hit_state_r_T_9 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _s2_new_hit_state_r_T_17 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _s2_new_hit_state_r_T_19 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _dataWriteArb_io_in_0_bits_wmask_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _s2_has_permission_r_T_11 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _s2_has_permission_r_T_13 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _s2_has_permission_r_T_21 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _s2_has_permission_r_T_23 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _s2_new_hit_state_r_T_11 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _s2_new_hit_state_r_T_13 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _s2_new_hit_state_r_T_21 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _s2_new_hit_state_r_T_23 = 2'h3; // @[Metadata.scala:24:15] wire nodeOut_a_ready = auto_out_a_ready_0; // @[MixedNode.scala:542:17] wire nodeOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_a_bits_param; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_a_bits_size; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_a_bits_data; // @[MixedNode.scala:542:17] wire nodeOut_b_ready; // @[MixedNode.scala:542:17] wire nodeOut_b_valid = auto_out_b_valid_0; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_b_bits_opcode = auto_out_b_bits_opcode_0; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_b_bits_param = auto_out_b_bits_param_0; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_b_bits_size = auto_out_b_bits_size_0; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_b_bits_source = auto_out_b_bits_source_0; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_b_bits_address = auto_out_b_bits_address_0; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_b_bits_mask = auto_out_b_bits_mask_0; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_b_bits_data = auto_out_b_bits_data_0; // @[MixedNode.scala:542:17] wire nodeOut_b_bits_corrupt = auto_out_b_bits_corrupt_0; // @[MixedNode.scala:542:17] wire nodeOut_c_ready = auto_out_c_ready_0; // @[MixedNode.scala:542:17] wire nodeOut_c_valid; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_c_bits_param; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_c_bits_size; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_c_bits_source; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_c_bits_address; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_c_bits_data; // @[MixedNode.scala:542:17] wire nodeOut_d_ready; // @[MixedNode.scala:542:17] wire nodeOut_d_valid = auto_out_d_valid_0; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_d_bits_opcode = auto_out_d_bits_opcode_0; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_d_bits_param = auto_out_d_bits_param_0; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_d_bits_size = auto_out_d_bits_size_0; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_d_bits_source = auto_out_d_bits_source_0; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_d_bits_sink = auto_out_d_bits_sink_0; // @[MixedNode.scala:542:17] wire nodeOut_d_bits_denied = auto_out_d_bits_denied_0; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_d_bits_data = auto_out_d_bits_data_0; // @[MixedNode.scala:542:17] wire nodeOut_d_bits_corrupt = auto_out_d_bits_corrupt_0; // @[MixedNode.scala:542:17] wire nodeOut_e_ready = auto_out_e_ready_0; // @[MixedNode.scala:542:17] wire nodeOut_e_valid; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_e_bits_sink; // @[MixedNode.scala:542:17] wire _io_lsu_req_ready_T; // @[dcache.scala:480:50] wire _s0_valid_WIRE_0 = io_lsu_req_bits_0_valid_0; // @[dcache.scala:413:7, :579:46] wire [6:0] _s0_req_WIRE_0_uop_uopc = io_lsu_req_bits_0_bits_uop_uopc_0; // @[dcache.scala:413:7, :582:54] wire [31:0] _s0_req_WIRE_0_uop_inst = io_lsu_req_bits_0_bits_uop_inst_0; // @[dcache.scala:413:7, :582:54] wire [31:0] _s0_req_WIRE_0_uop_debug_inst = io_lsu_req_bits_0_bits_uop_debug_inst_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_is_rvc = io_lsu_req_bits_0_bits_uop_is_rvc_0; // @[dcache.scala:413:7, :582:54] wire [39:0] _s0_req_WIRE_0_uop_debug_pc = io_lsu_req_bits_0_bits_uop_debug_pc_0; // @[dcache.scala:413:7, :582:54] wire [2:0] _s0_req_WIRE_0_uop_iq_type = io_lsu_req_bits_0_bits_uop_iq_type_0; // @[dcache.scala:413:7, :582:54] wire [9:0] _s0_req_WIRE_0_uop_fu_code = io_lsu_req_bits_0_bits_uop_fu_code_0; // @[dcache.scala:413:7, :582:54] wire [3:0] _s0_req_WIRE_0_uop_ctrl_br_type = io_lsu_req_bits_0_bits_uop_ctrl_br_type_0; // @[dcache.scala:413:7, :582:54] wire [1:0] _s0_req_WIRE_0_uop_ctrl_op1_sel = io_lsu_req_bits_0_bits_uop_ctrl_op1_sel_0; // @[dcache.scala:413:7, :582:54] wire [2:0] _s0_req_WIRE_0_uop_ctrl_op2_sel = io_lsu_req_bits_0_bits_uop_ctrl_op2_sel_0; // @[dcache.scala:413:7, :582:54] wire [2:0] _s0_req_WIRE_0_uop_ctrl_imm_sel = io_lsu_req_bits_0_bits_uop_ctrl_imm_sel_0; // @[dcache.scala:413:7, :582:54] wire [4:0] _s0_req_WIRE_0_uop_ctrl_op_fcn = io_lsu_req_bits_0_bits_uop_ctrl_op_fcn_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_ctrl_fcn_dw = io_lsu_req_bits_0_bits_uop_ctrl_fcn_dw_0; // @[dcache.scala:413:7, :582:54] wire [2:0] _s0_req_WIRE_0_uop_ctrl_csr_cmd = io_lsu_req_bits_0_bits_uop_ctrl_csr_cmd_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_ctrl_is_load = io_lsu_req_bits_0_bits_uop_ctrl_is_load_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_ctrl_is_sta = io_lsu_req_bits_0_bits_uop_ctrl_is_sta_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_ctrl_is_std = io_lsu_req_bits_0_bits_uop_ctrl_is_std_0; // @[dcache.scala:413:7, :582:54] wire [1:0] _s0_req_WIRE_0_uop_iw_state = io_lsu_req_bits_0_bits_uop_iw_state_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_iw_p1_poisoned = io_lsu_req_bits_0_bits_uop_iw_p1_poisoned_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_iw_p2_poisoned = io_lsu_req_bits_0_bits_uop_iw_p2_poisoned_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_is_br = io_lsu_req_bits_0_bits_uop_is_br_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_is_jalr = io_lsu_req_bits_0_bits_uop_is_jalr_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_is_jal = io_lsu_req_bits_0_bits_uop_is_jal_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_is_sfb = io_lsu_req_bits_0_bits_uop_is_sfb_0; // @[dcache.scala:413:7, :582:54] wire [7:0] _s0_req_WIRE_0_uop_br_mask = io_lsu_req_bits_0_bits_uop_br_mask_0; // @[dcache.scala:413:7, :582:54] wire [2:0] _s0_req_WIRE_0_uop_br_tag = io_lsu_req_bits_0_bits_uop_br_tag_0; // @[dcache.scala:413:7, :582:54] wire [3:0] _s0_req_WIRE_0_uop_ftq_idx = io_lsu_req_bits_0_bits_uop_ftq_idx_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_edge_inst = io_lsu_req_bits_0_bits_uop_edge_inst_0; // @[dcache.scala:413:7, :582:54] wire [5:0] _s0_req_WIRE_0_uop_pc_lob = io_lsu_req_bits_0_bits_uop_pc_lob_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_taken = io_lsu_req_bits_0_bits_uop_taken_0; // @[dcache.scala:413:7, :582:54] wire [19:0] _s0_req_WIRE_0_uop_imm_packed = io_lsu_req_bits_0_bits_uop_imm_packed_0; // @[dcache.scala:413:7, :582:54] wire [11:0] _s0_req_WIRE_0_uop_csr_addr = io_lsu_req_bits_0_bits_uop_csr_addr_0; // @[dcache.scala:413:7, :582:54] wire [4:0] _s0_req_WIRE_0_uop_rob_idx = io_lsu_req_bits_0_bits_uop_rob_idx_0; // @[dcache.scala:413:7, :582:54] wire [2:0] _s0_req_WIRE_0_uop_ldq_idx = io_lsu_req_bits_0_bits_uop_ldq_idx_0; // @[dcache.scala:413:7, :582:54] wire [2:0] _s0_req_WIRE_0_uop_stq_idx = io_lsu_req_bits_0_bits_uop_stq_idx_0; // @[dcache.scala:413:7, :582:54] wire [1:0] _s0_req_WIRE_0_uop_rxq_idx = io_lsu_req_bits_0_bits_uop_rxq_idx_0; // @[dcache.scala:413:7, :582:54] wire [5:0] _s0_req_WIRE_0_uop_pdst = io_lsu_req_bits_0_bits_uop_pdst_0; // @[dcache.scala:413:7, :582:54] wire [5:0] _s0_req_WIRE_0_uop_prs1 = io_lsu_req_bits_0_bits_uop_prs1_0; // @[dcache.scala:413:7, :582:54] wire [5:0] _s0_req_WIRE_0_uop_prs2 = io_lsu_req_bits_0_bits_uop_prs2_0; // @[dcache.scala:413:7, :582:54] wire [5:0] _s0_req_WIRE_0_uop_prs3 = io_lsu_req_bits_0_bits_uop_prs3_0; // @[dcache.scala:413:7, :582:54] wire [3:0] _s0_req_WIRE_0_uop_ppred = io_lsu_req_bits_0_bits_uop_ppred_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_prs1_busy = io_lsu_req_bits_0_bits_uop_prs1_busy_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_prs2_busy = io_lsu_req_bits_0_bits_uop_prs2_busy_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_prs3_busy = io_lsu_req_bits_0_bits_uop_prs3_busy_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_ppred_busy = io_lsu_req_bits_0_bits_uop_ppred_busy_0; // @[dcache.scala:413:7, :582:54] wire [5:0] _s0_req_WIRE_0_uop_stale_pdst = io_lsu_req_bits_0_bits_uop_stale_pdst_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_exception = io_lsu_req_bits_0_bits_uop_exception_0; // @[dcache.scala:413:7, :582:54] wire [63:0] _s0_req_WIRE_0_uop_exc_cause = io_lsu_req_bits_0_bits_uop_exc_cause_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_bypassable = io_lsu_req_bits_0_bits_uop_bypassable_0; // @[dcache.scala:413:7, :582:54] wire [4:0] _s0_req_WIRE_0_uop_mem_cmd = io_lsu_req_bits_0_bits_uop_mem_cmd_0; // @[dcache.scala:413:7, :582:54] wire [1:0] _s0_req_WIRE_0_uop_mem_size = io_lsu_req_bits_0_bits_uop_mem_size_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_mem_signed = io_lsu_req_bits_0_bits_uop_mem_signed_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_is_fence = io_lsu_req_bits_0_bits_uop_is_fence_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_is_fencei = io_lsu_req_bits_0_bits_uop_is_fencei_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_is_amo = io_lsu_req_bits_0_bits_uop_is_amo_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_uses_ldq = io_lsu_req_bits_0_bits_uop_uses_ldq_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_uses_stq = io_lsu_req_bits_0_bits_uop_uses_stq_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_is_sys_pc2epc = io_lsu_req_bits_0_bits_uop_is_sys_pc2epc_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_is_unique = io_lsu_req_bits_0_bits_uop_is_unique_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_flush_on_commit = io_lsu_req_bits_0_bits_uop_flush_on_commit_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_ldst_is_rs1 = io_lsu_req_bits_0_bits_uop_ldst_is_rs1_0; // @[dcache.scala:413:7, :582:54] wire [5:0] _s0_req_WIRE_0_uop_ldst = io_lsu_req_bits_0_bits_uop_ldst_0; // @[dcache.scala:413:7, :582:54] wire [5:0] _s0_req_WIRE_0_uop_lrs1 = io_lsu_req_bits_0_bits_uop_lrs1_0; // @[dcache.scala:413:7, :582:54] wire [5:0] _s0_req_WIRE_0_uop_lrs2 = io_lsu_req_bits_0_bits_uop_lrs2_0; // @[dcache.scala:413:7, :582:54] wire [5:0] _s0_req_WIRE_0_uop_lrs3 = io_lsu_req_bits_0_bits_uop_lrs3_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_ldst_val = io_lsu_req_bits_0_bits_uop_ldst_val_0; // @[dcache.scala:413:7, :582:54] wire [1:0] _s0_req_WIRE_0_uop_dst_rtype = io_lsu_req_bits_0_bits_uop_dst_rtype_0; // @[dcache.scala:413:7, :582:54] wire [1:0] _s0_req_WIRE_0_uop_lrs1_rtype = io_lsu_req_bits_0_bits_uop_lrs1_rtype_0; // @[dcache.scala:413:7, :582:54] wire [1:0] _s0_req_WIRE_0_uop_lrs2_rtype = io_lsu_req_bits_0_bits_uop_lrs2_rtype_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_frs3_en = io_lsu_req_bits_0_bits_uop_frs3_en_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_fp_val = io_lsu_req_bits_0_bits_uop_fp_val_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_fp_single = io_lsu_req_bits_0_bits_uop_fp_single_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_xcpt_pf_if = io_lsu_req_bits_0_bits_uop_xcpt_pf_if_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_xcpt_ae_if = io_lsu_req_bits_0_bits_uop_xcpt_ae_if_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_xcpt_ma_if = io_lsu_req_bits_0_bits_uop_xcpt_ma_if_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_bp_debug_if = io_lsu_req_bits_0_bits_uop_bp_debug_if_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_uop_bp_xcpt_if = io_lsu_req_bits_0_bits_uop_bp_xcpt_if_0; // @[dcache.scala:413:7, :582:54] wire [1:0] _s0_req_WIRE_0_uop_debug_fsrc = io_lsu_req_bits_0_bits_uop_debug_fsrc_0; // @[dcache.scala:413:7, :582:54] wire [1:0] _s0_req_WIRE_0_uop_debug_tsrc = io_lsu_req_bits_0_bits_uop_debug_tsrc_0; // @[dcache.scala:413:7, :582:54] wire [39:0] _s0_req_WIRE_0_addr = io_lsu_req_bits_0_bits_addr_0; // @[dcache.scala:413:7, :582:54] wire [63:0] _s0_req_WIRE_0_data = io_lsu_req_bits_0_bits_data_0; // @[dcache.scala:413:7, :582:54] wire _s0_req_WIRE_0_is_hella = io_lsu_req_bits_0_bits_is_hella_0; // @[dcache.scala:413:7, :582:54] wire _io_lsu_resp_0_valid_T_6; // @[dcache.scala:858:78] wire [6:0] io_lsu_resp_0_bits_out_uop_uopc; // @[util.scala:101:23] wire [31:0] io_lsu_resp_0_bits_out_uop_inst; // @[util.scala:101:23] wire [31:0] io_lsu_resp_0_bits_out_uop_debug_inst; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_is_rvc; // @[util.scala:101:23] wire [39:0] io_lsu_resp_0_bits_out_uop_debug_pc; // @[util.scala:101:23] wire [2:0] io_lsu_resp_0_bits_out_uop_iq_type; // @[util.scala:101:23] wire [9:0] io_lsu_resp_0_bits_out_uop_fu_code; // @[util.scala:101:23] wire [3:0] io_lsu_resp_0_bits_out_uop_ctrl_br_type; // @[util.scala:101:23] wire [1:0] io_lsu_resp_0_bits_out_uop_ctrl_op1_sel; // @[util.scala:101:23] wire [2:0] io_lsu_resp_0_bits_out_uop_ctrl_op2_sel; // @[util.scala:101:23] wire [2:0] io_lsu_resp_0_bits_out_uop_ctrl_imm_sel; // @[util.scala:101:23] wire [4:0] io_lsu_resp_0_bits_out_uop_ctrl_op_fcn; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_ctrl_fcn_dw; // @[util.scala:101:23] wire [2:0] io_lsu_resp_0_bits_out_uop_ctrl_csr_cmd; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_ctrl_is_load; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_ctrl_is_sta; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_ctrl_is_std; // @[util.scala:101:23] wire [1:0] io_lsu_resp_0_bits_out_uop_iw_state; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_iw_p1_poisoned; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_iw_p2_poisoned; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_is_br; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_is_jalr; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_is_jal; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_is_sfb; // @[util.scala:101:23] wire [7:0] io_lsu_resp_0_bits_out_uop_br_mask; // @[util.scala:101:23] wire [2:0] io_lsu_resp_0_bits_out_uop_br_tag; // @[util.scala:101:23] wire [3:0] io_lsu_resp_0_bits_out_uop_ftq_idx; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_edge_inst; // @[util.scala:101:23] wire [5:0] io_lsu_resp_0_bits_out_uop_pc_lob; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_taken; // @[util.scala:101:23] wire [19:0] io_lsu_resp_0_bits_out_uop_imm_packed; // @[util.scala:101:23] wire [11:0] io_lsu_resp_0_bits_out_uop_csr_addr; // @[util.scala:101:23] wire [4:0] io_lsu_resp_0_bits_out_uop_rob_idx; // @[util.scala:101:23] wire [2:0] io_lsu_resp_0_bits_out_uop_ldq_idx; // @[util.scala:101:23] wire [2:0] io_lsu_resp_0_bits_out_uop_stq_idx; // @[util.scala:101:23] wire [1:0] io_lsu_resp_0_bits_out_uop_rxq_idx; // @[util.scala:101:23] wire [5:0] io_lsu_resp_0_bits_out_uop_pdst; // @[util.scala:101:23] wire [5:0] io_lsu_resp_0_bits_out_uop_prs1; // @[util.scala:101:23] wire [5:0] io_lsu_resp_0_bits_out_uop_prs2; // @[util.scala:101:23] wire [5:0] io_lsu_resp_0_bits_out_uop_prs3; // @[util.scala:101:23] wire [3:0] io_lsu_resp_0_bits_out_uop_ppred; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_prs1_busy; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_prs2_busy; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_prs3_busy; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_ppred_busy; // @[util.scala:101:23] wire [5:0] io_lsu_resp_0_bits_out_uop_stale_pdst; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_exception; // @[util.scala:101:23] wire [63:0] io_lsu_resp_0_bits_out_uop_exc_cause; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_bypassable; // @[util.scala:101:23] wire [4:0] io_lsu_resp_0_bits_out_uop_mem_cmd; // @[util.scala:101:23] wire [1:0] io_lsu_resp_0_bits_out_uop_mem_size; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_mem_signed; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_is_fence; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_is_fencei; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_is_amo; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_uses_ldq; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_uses_stq; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_is_sys_pc2epc; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_is_unique; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_flush_on_commit; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_ldst_is_rs1; // @[util.scala:101:23] wire [5:0] io_lsu_resp_0_bits_out_uop_ldst; // @[util.scala:101:23] wire [5:0] io_lsu_resp_0_bits_out_uop_lrs1; // @[util.scala:101:23] wire [5:0] io_lsu_resp_0_bits_out_uop_lrs2; // @[util.scala:101:23] wire [5:0] io_lsu_resp_0_bits_out_uop_lrs3; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_ldst_val; // @[util.scala:101:23] wire [1:0] io_lsu_resp_0_bits_out_uop_dst_rtype; // @[util.scala:101:23] wire [1:0] io_lsu_resp_0_bits_out_uop_lrs1_rtype; // @[util.scala:101:23] wire [1:0] io_lsu_resp_0_bits_out_uop_lrs2_rtype; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_frs3_en; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_fp_val; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_fp_single; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_xcpt_pf_if; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_xcpt_ae_if; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_xcpt_ma_if; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_bp_debug_if; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_uop_bp_xcpt_if; // @[util.scala:101:23] wire [1:0] io_lsu_resp_0_bits_out_uop_debug_fsrc; // @[util.scala:101:23] wire [1:0] io_lsu_resp_0_bits_out_uop_debug_tsrc; // @[util.scala:101:23] wire [63:0] io_lsu_resp_0_bits_out_data; // @[util.scala:101:23] wire io_lsu_resp_0_bits_out_is_hella; // @[util.scala:101:23] wire _io_lsu_nack_0_valid_T_7; // @[dcache.scala:863:75] wire [6:0] io_lsu_nack_0_bits_out_uop_uopc; // @[util.scala:101:23] wire [31:0] io_lsu_nack_0_bits_out_uop_inst; // @[util.scala:101:23] wire [31:0] io_lsu_nack_0_bits_out_uop_debug_inst; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_is_rvc; // @[util.scala:101:23] wire [39:0] io_lsu_nack_0_bits_out_uop_debug_pc; // @[util.scala:101:23] wire [2:0] io_lsu_nack_0_bits_out_uop_iq_type; // @[util.scala:101:23] wire [9:0] io_lsu_nack_0_bits_out_uop_fu_code; // @[util.scala:101:23] wire [3:0] io_lsu_nack_0_bits_out_uop_ctrl_br_type; // @[util.scala:101:23] wire [1:0] io_lsu_nack_0_bits_out_uop_ctrl_op1_sel; // @[util.scala:101:23] wire [2:0] io_lsu_nack_0_bits_out_uop_ctrl_op2_sel; // @[util.scala:101:23] wire [2:0] io_lsu_nack_0_bits_out_uop_ctrl_imm_sel; // @[util.scala:101:23] wire [4:0] io_lsu_nack_0_bits_out_uop_ctrl_op_fcn; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_ctrl_fcn_dw; // @[util.scala:101:23] wire [2:0] io_lsu_nack_0_bits_out_uop_ctrl_csr_cmd; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_ctrl_is_load; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_ctrl_is_sta; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_ctrl_is_std; // @[util.scala:101:23] wire [1:0] io_lsu_nack_0_bits_out_uop_iw_state; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_iw_p1_poisoned; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_iw_p2_poisoned; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_is_br; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_is_jalr; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_is_jal; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_is_sfb; // @[util.scala:101:23] wire [7:0] io_lsu_nack_0_bits_out_uop_br_mask; // @[util.scala:101:23] wire [2:0] io_lsu_nack_0_bits_out_uop_br_tag; // @[util.scala:101:23] wire [3:0] io_lsu_nack_0_bits_out_uop_ftq_idx; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_edge_inst; // @[util.scala:101:23] wire [5:0] io_lsu_nack_0_bits_out_uop_pc_lob; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_taken; // @[util.scala:101:23] wire [19:0] io_lsu_nack_0_bits_out_uop_imm_packed; // @[util.scala:101:23] wire [11:0] io_lsu_nack_0_bits_out_uop_csr_addr; // @[util.scala:101:23] wire [4:0] io_lsu_nack_0_bits_out_uop_rob_idx; // @[util.scala:101:23] wire [2:0] io_lsu_nack_0_bits_out_uop_ldq_idx; // @[util.scala:101:23] wire [2:0] io_lsu_nack_0_bits_out_uop_stq_idx; // @[util.scala:101:23] wire [1:0] io_lsu_nack_0_bits_out_uop_rxq_idx; // @[util.scala:101:23] wire [5:0] io_lsu_nack_0_bits_out_uop_pdst; // @[util.scala:101:23] wire [5:0] io_lsu_nack_0_bits_out_uop_prs1; // @[util.scala:101:23] wire [5:0] io_lsu_nack_0_bits_out_uop_prs2; // @[util.scala:101:23] wire [5:0] io_lsu_nack_0_bits_out_uop_prs3; // @[util.scala:101:23] wire [3:0] io_lsu_nack_0_bits_out_uop_ppred; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_prs1_busy; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_prs2_busy; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_prs3_busy; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_ppred_busy; // @[util.scala:101:23] wire [5:0] io_lsu_nack_0_bits_out_uop_stale_pdst; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_exception; // @[util.scala:101:23] wire [63:0] io_lsu_nack_0_bits_out_uop_exc_cause; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_bypassable; // @[util.scala:101:23] wire [4:0] io_lsu_nack_0_bits_out_uop_mem_cmd; // @[util.scala:101:23] wire [1:0] io_lsu_nack_0_bits_out_uop_mem_size; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_mem_signed; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_is_fence; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_is_fencei; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_is_amo; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_uses_ldq; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_uses_stq; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_is_sys_pc2epc; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_is_unique; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_flush_on_commit; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_ldst_is_rs1; // @[util.scala:101:23] wire [5:0] io_lsu_nack_0_bits_out_uop_ldst; // @[util.scala:101:23] wire [5:0] io_lsu_nack_0_bits_out_uop_lrs1; // @[util.scala:101:23] wire [5:0] io_lsu_nack_0_bits_out_uop_lrs2; // @[util.scala:101:23] wire [5:0] io_lsu_nack_0_bits_out_uop_lrs3; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_ldst_val; // @[util.scala:101:23] wire [1:0] io_lsu_nack_0_bits_out_uop_dst_rtype; // @[util.scala:101:23] wire [1:0] io_lsu_nack_0_bits_out_uop_lrs1_rtype; // @[util.scala:101:23] wire [1:0] io_lsu_nack_0_bits_out_uop_lrs2_rtype; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_frs3_en; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_fp_val; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_fp_single; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_xcpt_pf_if; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_xcpt_ae_if; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_xcpt_ma_if; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_bp_debug_if; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_uop_bp_xcpt_if; // @[util.scala:101:23] wire [1:0] io_lsu_nack_0_bits_out_uop_debug_fsrc; // @[util.scala:101:23] wire [1:0] io_lsu_nack_0_bits_out_uop_debug_tsrc; // @[util.scala:101:23] wire [39:0] io_lsu_nack_0_bits_out_addr; // @[util.scala:101:23] wire [63:0] io_lsu_nack_0_bits_out_data; // @[util.scala:101:23] wire io_lsu_nack_0_bits_out_is_hella; // @[util.scala:101:23] wire _io_lsu_ordered_T_3; // @[dcache.scala:913:66] wire io_lsu_perf_acquire_done; // @[Edges.scala:233:22] wire io_lsu_perf_release_done; // @[Edges.scala:233:22] wire [2:0] auto_out_a_bits_opcode_0; // @[dcache.scala:413:7] wire [2:0] auto_out_a_bits_param_0; // @[dcache.scala:413:7] wire [3:0] auto_out_a_bits_size_0; // @[dcache.scala:413:7] wire [1:0] auto_out_a_bits_source_0; // @[dcache.scala:413:7] wire [31:0] auto_out_a_bits_address_0; // @[dcache.scala:413:7] wire [7:0] auto_out_a_bits_mask_0; // @[dcache.scala:413:7] wire [63:0] auto_out_a_bits_data_0; // @[dcache.scala:413:7] wire auto_out_a_valid_0; // @[dcache.scala:413:7] wire auto_out_b_ready_0; // @[dcache.scala:413:7] wire [2:0] auto_out_c_bits_opcode_0; // @[dcache.scala:413:7] wire [2:0] auto_out_c_bits_param_0; // @[dcache.scala:413:7] wire [3:0] auto_out_c_bits_size_0; // @[dcache.scala:413:7] wire [1:0] auto_out_c_bits_source_0; // @[dcache.scala:413:7] wire [31:0] auto_out_c_bits_address_0; // @[dcache.scala:413:7] wire [63:0] auto_out_c_bits_data_0; // @[dcache.scala:413:7] wire auto_out_c_valid_0; // @[dcache.scala:413:7] wire auto_out_d_ready_0; // @[dcache.scala:413:7] wire [2:0] auto_out_e_bits_sink_0; // @[dcache.scala:413:7] wire auto_out_e_valid_0; // @[dcache.scala:413:7] wire io_lsu_req_ready_0; // @[dcache.scala:413:7] wire [3:0] io_lsu_resp_0_bits_uop_ctrl_br_type_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_resp_0_bits_uop_ctrl_op1_sel_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_resp_0_bits_uop_ctrl_op2_sel_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_resp_0_bits_uop_ctrl_imm_sel_0; // @[dcache.scala:413:7] wire [4:0] io_lsu_resp_0_bits_uop_ctrl_op_fcn_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_ctrl_fcn_dw_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_resp_0_bits_uop_ctrl_csr_cmd_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_ctrl_is_load_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_ctrl_is_sta_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_ctrl_is_std_0; // @[dcache.scala:413:7] wire [6:0] io_lsu_resp_0_bits_uop_uopc_0; // @[dcache.scala:413:7] wire [31:0] io_lsu_resp_0_bits_uop_inst_0; // @[dcache.scala:413:7] wire [31:0] io_lsu_resp_0_bits_uop_debug_inst_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_is_rvc_0; // @[dcache.scala:413:7] wire [39:0] io_lsu_resp_0_bits_uop_debug_pc_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_resp_0_bits_uop_iq_type_0; // @[dcache.scala:413:7] wire [9:0] io_lsu_resp_0_bits_uop_fu_code_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_resp_0_bits_uop_iw_state_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_iw_p1_poisoned_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_iw_p2_poisoned_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_is_br_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_is_jalr_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_is_jal_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_is_sfb_0; // @[dcache.scala:413:7] wire [7:0] io_lsu_resp_0_bits_uop_br_mask_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_resp_0_bits_uop_br_tag_0; // @[dcache.scala:413:7] wire [3:0] io_lsu_resp_0_bits_uop_ftq_idx_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_edge_inst_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_resp_0_bits_uop_pc_lob_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_taken_0; // @[dcache.scala:413:7] wire [19:0] io_lsu_resp_0_bits_uop_imm_packed_0; // @[dcache.scala:413:7] wire [11:0] io_lsu_resp_0_bits_uop_csr_addr_0; // @[dcache.scala:413:7] wire [4:0] io_lsu_resp_0_bits_uop_rob_idx_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_resp_0_bits_uop_ldq_idx_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_resp_0_bits_uop_stq_idx_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_resp_0_bits_uop_rxq_idx_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_resp_0_bits_uop_pdst_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_resp_0_bits_uop_prs1_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_resp_0_bits_uop_prs2_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_resp_0_bits_uop_prs3_0; // @[dcache.scala:413:7] wire [3:0] io_lsu_resp_0_bits_uop_ppred_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_prs1_busy_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_prs2_busy_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_prs3_busy_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_ppred_busy_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_resp_0_bits_uop_stale_pdst_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_exception_0; // @[dcache.scala:413:7] wire [63:0] io_lsu_resp_0_bits_uop_exc_cause_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_bypassable_0; // @[dcache.scala:413:7] wire [4:0] io_lsu_resp_0_bits_uop_mem_cmd_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_resp_0_bits_uop_mem_size_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_mem_signed_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_is_fence_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_is_fencei_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_is_amo_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_uses_ldq_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_uses_stq_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_is_sys_pc2epc_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_is_unique_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_flush_on_commit_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_ldst_is_rs1_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_resp_0_bits_uop_ldst_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_resp_0_bits_uop_lrs1_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_resp_0_bits_uop_lrs2_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_resp_0_bits_uop_lrs3_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_ldst_val_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_resp_0_bits_uop_dst_rtype_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_resp_0_bits_uop_lrs1_rtype_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_resp_0_bits_uop_lrs2_rtype_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_frs3_en_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_fp_val_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_fp_single_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_xcpt_pf_if_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_xcpt_ae_if_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_xcpt_ma_if_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_bp_debug_if_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_uop_bp_xcpt_if_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_resp_0_bits_uop_debug_fsrc_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_resp_0_bits_uop_debug_tsrc_0; // @[dcache.scala:413:7] wire [63:0] io_lsu_resp_0_bits_data_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_bits_is_hella_0; // @[dcache.scala:413:7] wire io_lsu_resp_0_valid_0; // @[dcache.scala:413:7] wire [3:0] io_lsu_nack_0_bits_uop_ctrl_br_type_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_nack_0_bits_uop_ctrl_op1_sel_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_nack_0_bits_uop_ctrl_op2_sel_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_nack_0_bits_uop_ctrl_imm_sel_0; // @[dcache.scala:413:7] wire [4:0] io_lsu_nack_0_bits_uop_ctrl_op_fcn_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_ctrl_fcn_dw_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_nack_0_bits_uop_ctrl_csr_cmd_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_ctrl_is_load_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_ctrl_is_sta_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_ctrl_is_std_0; // @[dcache.scala:413:7] wire [6:0] io_lsu_nack_0_bits_uop_uopc_0; // @[dcache.scala:413:7] wire [31:0] io_lsu_nack_0_bits_uop_inst_0; // @[dcache.scala:413:7] wire [31:0] io_lsu_nack_0_bits_uop_debug_inst_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_is_rvc_0; // @[dcache.scala:413:7] wire [39:0] io_lsu_nack_0_bits_uop_debug_pc_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_nack_0_bits_uop_iq_type_0; // @[dcache.scala:413:7] wire [9:0] io_lsu_nack_0_bits_uop_fu_code_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_nack_0_bits_uop_iw_state_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_iw_p1_poisoned_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_iw_p2_poisoned_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_is_br_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_is_jalr_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_is_jal_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_is_sfb_0; // @[dcache.scala:413:7] wire [7:0] io_lsu_nack_0_bits_uop_br_mask_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_nack_0_bits_uop_br_tag_0; // @[dcache.scala:413:7] wire [3:0] io_lsu_nack_0_bits_uop_ftq_idx_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_edge_inst_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_nack_0_bits_uop_pc_lob_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_taken_0; // @[dcache.scala:413:7] wire [19:0] io_lsu_nack_0_bits_uop_imm_packed_0; // @[dcache.scala:413:7] wire [11:0] io_lsu_nack_0_bits_uop_csr_addr_0; // @[dcache.scala:413:7] wire [4:0] io_lsu_nack_0_bits_uop_rob_idx_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_nack_0_bits_uop_ldq_idx_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_nack_0_bits_uop_stq_idx_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_nack_0_bits_uop_rxq_idx_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_nack_0_bits_uop_pdst_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_nack_0_bits_uop_prs1_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_nack_0_bits_uop_prs2_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_nack_0_bits_uop_prs3_0; // @[dcache.scala:413:7] wire [3:0] io_lsu_nack_0_bits_uop_ppred_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_prs1_busy_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_prs2_busy_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_prs3_busy_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_ppred_busy_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_nack_0_bits_uop_stale_pdst_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_exception_0; // @[dcache.scala:413:7] wire [63:0] io_lsu_nack_0_bits_uop_exc_cause_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_bypassable_0; // @[dcache.scala:413:7] wire [4:0] io_lsu_nack_0_bits_uop_mem_cmd_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_nack_0_bits_uop_mem_size_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_mem_signed_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_is_fence_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_is_fencei_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_is_amo_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_uses_ldq_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_uses_stq_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_is_sys_pc2epc_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_is_unique_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_flush_on_commit_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_ldst_is_rs1_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_nack_0_bits_uop_ldst_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_nack_0_bits_uop_lrs1_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_nack_0_bits_uop_lrs2_0; // @[dcache.scala:413:7] wire [5:0] io_lsu_nack_0_bits_uop_lrs3_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_ldst_val_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_nack_0_bits_uop_dst_rtype_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_nack_0_bits_uop_lrs1_rtype_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_nack_0_bits_uop_lrs2_rtype_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_frs3_en_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_fp_val_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_fp_single_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_xcpt_pf_if_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_xcpt_ae_if_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_xcpt_ma_if_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_bp_debug_if_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_uop_bp_xcpt_if_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_nack_0_bits_uop_debug_fsrc_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_nack_0_bits_uop_debug_tsrc_0; // @[dcache.scala:413:7] wire [39:0] io_lsu_nack_0_bits_addr_0; // @[dcache.scala:413:7] wire [63:0] io_lsu_nack_0_bits_data_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_bits_is_hella_0; // @[dcache.scala:413:7] wire io_lsu_nack_0_valid_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_release_bits_opcode_0; // @[dcache.scala:413:7] wire [2:0] io_lsu_release_bits_param_0; // @[dcache.scala:413:7] wire [3:0] io_lsu_release_bits_size_0; // @[dcache.scala:413:7] wire [1:0] io_lsu_release_bits_source_0; // @[dcache.scala:413:7] wire [31:0] io_lsu_release_bits_address_0; // @[dcache.scala:413:7] wire [63:0] io_lsu_release_bits_data_0; // @[dcache.scala:413:7] wire io_lsu_release_valid_0; // @[dcache.scala:413:7] wire io_lsu_perf_acquire_0; // @[dcache.scala:413:7] wire io_lsu_perf_release_0; // @[dcache.scala:413:7] wire io_lsu_ordered_0; // @[dcache.scala:413:7] assign auto_out_a_valid_0 = nodeOut_a_valid; // @[MixedNode.scala:542:17] assign auto_out_a_bits_opcode_0 = nodeOut_a_bits_opcode; // @[MixedNode.scala:542:17] assign auto_out_a_bits_param_0 = nodeOut_a_bits_param; // @[MixedNode.scala:542:17] assign auto_out_a_bits_size_0 = nodeOut_a_bits_size; // @[MixedNode.scala:542:17] assign auto_out_a_bits_source_0 = nodeOut_a_bits_source; // @[MixedNode.scala:542:17] assign auto_out_a_bits_address_0 = nodeOut_a_bits_address; // @[MixedNode.scala:542:17] assign auto_out_a_bits_mask_0 = nodeOut_a_bits_mask; // @[MixedNode.scala:542:17] assign auto_out_a_bits_data_0 = nodeOut_a_bits_data; // @[MixedNode.scala:542:17] wire _nodeOut_b_ready_T_1; // @[dcache.scala:779:48] assign auto_out_b_ready_0 = nodeOut_b_ready; // @[MixedNode.scala:542:17] wire _nodeOut_c_valid_T_4; // @[Arbiter.scala:96:24] assign auto_out_c_valid_0 = nodeOut_c_valid; // @[MixedNode.scala:542:17] wire [2:0] _nodeOut_c_bits_WIRE_opcode; // @[Mux.scala:30:73] assign auto_out_c_bits_opcode_0 = nodeOut_c_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] _nodeOut_c_bits_WIRE_param; // @[Mux.scala:30:73] assign auto_out_c_bits_param_0 = nodeOut_c_bits_param; // @[MixedNode.scala:542:17] wire [3:0] _nodeOut_c_bits_WIRE_size; // @[Mux.scala:30:73] assign auto_out_c_bits_size_0 = nodeOut_c_bits_size; // @[MixedNode.scala:542:17] wire [1:0] _nodeOut_c_bits_WIRE_source; // @[Mux.scala:30:73] assign auto_out_c_bits_source_0 = nodeOut_c_bits_source; // @[MixedNode.scala:542:17] wire [31:0] _nodeOut_c_bits_WIRE_address; // @[Mux.scala:30:73] assign auto_out_c_bits_address_0 = nodeOut_c_bits_address; // @[MixedNode.scala:542:17] wire [63:0] _nodeOut_c_bits_WIRE_data; // @[Mux.scala:30:73] assign auto_out_c_bits_data_0 = nodeOut_c_bits_data; // @[MixedNode.scala:542:17] assign auto_out_d_ready_0 = nodeOut_d_ready; // @[MixedNode.scala:542:17] assign auto_out_e_valid_0 = nodeOut_e_valid; // @[MixedNode.scala:542:17] assign auto_out_e_bits_sink_0 = nodeOut_e_bits_sink; // @[MixedNode.scala:542:17] wire _meta_0_io_write_valid_T = _meta_0_io_write_ready & _metaWriteArb_io_out_valid; // @[Decoupled.scala:51:35] wire _data_io_read_0_valid_T = _dataReadArb_io_out_bits_valid_0 & _dataReadArb_io_out_valid; // @[dcache.scala:463:27, :468:63] assign _io_lsu_req_ready_T = _metaReadArb_io_in_4_ready & _dataReadArb_io_in_2_ready; // @[dcache.scala:445:27, :463:27, :480:50] assign io_lsu_req_ready_0 = _io_lsu_req_ready_T; // @[dcache.scala:413:7, :480:50] wire [33:0] _metaReadArb_io_in_4_bits_req_0_idx_T = io_lsu_req_bits_0_bits_addr_0[39:6]; // @[dcache.scala:413:7, :485:77] wire [3:0] replay_req_0_uop_ctrl_br_type; // @[dcache.scala:496:24] wire [1:0] replay_req_0_uop_ctrl_op1_sel; // @[dcache.scala:496:24] wire [2:0] replay_req_0_uop_ctrl_op2_sel; // @[dcache.scala:496:24] wire [2:0] replay_req_0_uop_ctrl_imm_sel; // @[dcache.scala:496:24] wire [4:0] replay_req_0_uop_ctrl_op_fcn; // @[dcache.scala:496:24] wire replay_req_0_uop_ctrl_fcn_dw; // @[dcache.scala:496:24] wire [2:0] replay_req_0_uop_ctrl_csr_cmd; // @[dcache.scala:496:24] wire replay_req_0_uop_ctrl_is_load; // @[dcache.scala:496:24] wire replay_req_0_uop_ctrl_is_sta; // @[dcache.scala:496:24] wire replay_req_0_uop_ctrl_is_std; // @[dcache.scala:496:24] wire [6:0] replay_req_0_uop_uopc; // @[dcache.scala:496:24] wire [31:0] replay_req_0_uop_inst; // @[dcache.scala:496:24] wire [31:0] replay_req_0_uop_debug_inst; // @[dcache.scala:496:24] wire replay_req_0_uop_is_rvc; // @[dcache.scala:496:24] wire [39:0] replay_req_0_uop_debug_pc; // @[dcache.scala:496:24] wire [2:0] replay_req_0_uop_iq_type; // @[dcache.scala:496:24] wire [9:0] replay_req_0_uop_fu_code; // @[dcache.scala:496:24] wire [1:0] replay_req_0_uop_iw_state; // @[dcache.scala:496:24] wire replay_req_0_uop_iw_p1_poisoned; // @[dcache.scala:496:24] wire replay_req_0_uop_iw_p2_poisoned; // @[dcache.scala:496:24] wire replay_req_0_uop_is_br; // @[dcache.scala:496:24] wire replay_req_0_uop_is_jalr; // @[dcache.scala:496:24] wire replay_req_0_uop_is_jal; // @[dcache.scala:496:24] wire replay_req_0_uop_is_sfb; // @[dcache.scala:496:24] wire [7:0] replay_req_0_uop_br_mask; // @[dcache.scala:496:24] wire [2:0] replay_req_0_uop_br_tag; // @[dcache.scala:496:24] wire [3:0] replay_req_0_uop_ftq_idx; // @[dcache.scala:496:24] wire replay_req_0_uop_edge_inst; // @[dcache.scala:496:24] wire [5:0] replay_req_0_uop_pc_lob; // @[dcache.scala:496:24] wire replay_req_0_uop_taken; // @[dcache.scala:496:24] wire [19:0] replay_req_0_uop_imm_packed; // @[dcache.scala:496:24] wire [11:0] replay_req_0_uop_csr_addr; // @[dcache.scala:496:24] wire [4:0] replay_req_0_uop_rob_idx; // @[dcache.scala:496:24] wire [2:0] replay_req_0_uop_ldq_idx; // @[dcache.scala:496:24] wire [2:0] replay_req_0_uop_stq_idx; // @[dcache.scala:496:24] wire [1:0] replay_req_0_uop_rxq_idx; // @[dcache.scala:496:24] wire [5:0] replay_req_0_uop_pdst; // @[dcache.scala:496:24] wire [5:0] replay_req_0_uop_prs1; // @[dcache.scala:496:24] wire [5:0] replay_req_0_uop_prs2; // @[dcache.scala:496:24] wire [5:0] replay_req_0_uop_prs3; // @[dcache.scala:496:24] wire [3:0] replay_req_0_uop_ppred; // @[dcache.scala:496:24] wire replay_req_0_uop_prs1_busy; // @[dcache.scala:496:24] wire replay_req_0_uop_prs2_busy; // @[dcache.scala:496:24] wire replay_req_0_uop_prs3_busy; // @[dcache.scala:496:24] wire replay_req_0_uop_ppred_busy; // @[dcache.scala:496:24] wire [5:0] replay_req_0_uop_stale_pdst; // @[dcache.scala:496:24] wire replay_req_0_uop_exception; // @[dcache.scala:496:24] wire [63:0] replay_req_0_uop_exc_cause; // @[dcache.scala:496:24] wire replay_req_0_uop_bypassable; // @[dcache.scala:496:24] wire [4:0] replay_req_0_uop_mem_cmd; // @[dcache.scala:496:24] wire [1:0] replay_req_0_uop_mem_size; // @[dcache.scala:496:24] wire replay_req_0_uop_mem_signed; // @[dcache.scala:496:24] wire replay_req_0_uop_is_fence; // @[dcache.scala:496:24] wire replay_req_0_uop_is_fencei; // @[dcache.scala:496:24] wire replay_req_0_uop_is_amo; // @[dcache.scala:496:24] wire replay_req_0_uop_uses_ldq; // @[dcache.scala:496:24] wire replay_req_0_uop_uses_stq; // @[dcache.scala:496:24] wire replay_req_0_uop_is_sys_pc2epc; // @[dcache.scala:496:24] wire replay_req_0_uop_is_unique; // @[dcache.scala:496:24] wire replay_req_0_uop_flush_on_commit; // @[dcache.scala:496:24] wire replay_req_0_uop_ldst_is_rs1; // @[dcache.scala:496:24] wire [5:0] replay_req_0_uop_ldst; // @[dcache.scala:496:24] wire [5:0] replay_req_0_uop_lrs1; // @[dcache.scala:496:24] wire [5:0] replay_req_0_uop_lrs2; // @[dcache.scala:496:24] wire [5:0] replay_req_0_uop_lrs3; // @[dcache.scala:496:24] wire replay_req_0_uop_ldst_val; // @[dcache.scala:496:24] wire [1:0] replay_req_0_uop_dst_rtype; // @[dcache.scala:496:24] wire [1:0] replay_req_0_uop_lrs1_rtype; // @[dcache.scala:496:24] wire [1:0] replay_req_0_uop_lrs2_rtype; // @[dcache.scala:496:24] wire replay_req_0_uop_frs3_en; // @[dcache.scala:496:24] wire replay_req_0_uop_fp_val; // @[dcache.scala:496:24] wire replay_req_0_uop_fp_single; // @[dcache.scala:496:24] wire replay_req_0_uop_xcpt_pf_if; // @[dcache.scala:496:24] wire replay_req_0_uop_xcpt_ae_if; // @[dcache.scala:496:24] wire replay_req_0_uop_xcpt_ma_if; // @[dcache.scala:496:24] wire replay_req_0_uop_bp_debug_if; // @[dcache.scala:496:24] wire replay_req_0_uop_bp_xcpt_if; // @[dcache.scala:496:24] wire [1:0] replay_req_0_uop_debug_fsrc; // @[dcache.scala:496:24] wire [1:0] replay_req_0_uop_debug_tsrc; // @[dcache.scala:496:24] wire [39:0] replay_req_0_addr; // @[dcache.scala:496:24] wire [63:0] replay_req_0_data; // @[dcache.scala:496:24] wire replay_req_0_is_hella; // @[dcache.scala:496:24] wire [33:0] _metaReadArb_io_in_0_bits_req_0_idx_T = _mshrs_io_replay_bits_addr[39:6]; // @[dcache.scala:433:21, :506:72] wire [39:0] mshr_read_req_0_addr; // @[dcache.scala:517:27] wire [25:0] _mshr_read_req_0_addr_T = {_mshrs_io_meta_read_bits_tag, _mshrs_io_meta_read_bits_idx}; // @[dcache.scala:433:21, :520:35] wire [31:0] _mshr_read_req_0_addr_T_1 = {_mshr_read_req_0_addr_T, 6'h0}; // @[dcache.scala:520:{35,94}] assign mshr_read_req_0_addr = {8'h0, _mshr_read_req_0_addr_T_1}; // @[dcache.scala:517:27, :520:{29,94}] wire _wb_io_meta_read_ready_T; // @[dcache.scala:542:55] wire _wb_fire_T = _wb_io_meta_read_ready_T & _wb_io_meta_read_valid; // @[Decoupled.scala:51:35] wire _wb_io_data_req_ready_T; // @[dcache.scala:547:55] wire _wb_fire_T_1 = _wb_io_data_req_ready_T & _wb_io_data_req_valid; // @[Decoupled.scala:51:35] wire wb_fire = _wb_fire_T & _wb_fire_T_1; // @[Decoupled.scala:51:35] wire [39:0] wb_req_0_addr; // @[dcache.scala:532:20] wire [31:0] _wb_req_0_addr_T = {_wb_io_meta_read_bits_tag, _wb_io_data_req_bits_addr}; // @[dcache.scala:431:18, :535:28] assign wb_req_0_addr = {8'h0, _wb_req_0_addr_T}; // @[dcache.scala:532:20, :535:{22,28}] wire _GEN = _metaReadArb_io_in_2_ready & _dataReadArb_io_in_1_ready; // @[dcache.scala:445:27, :463:27, :542:55] assign _wb_io_meta_read_ready_T = _GEN; // @[dcache.scala:542:55] assign _wb_io_data_req_ready_T = _GEN; // @[dcache.scala:542:55, :547:55] wire prober_fire = _metaReadArb_io_in_1_ready & _prober_io_meta_read_valid; // @[Decoupled.scala:51:35] wire [39:0] prober_req_0_addr; // @[dcache.scala:553:26] wire [25:0] _prober_req_0_addr_T = {_prober_io_meta_read_bits_tag, _prober_io_meta_read_bits_idx}; // @[dcache.scala:432:22, :556:32] wire [31:0] _prober_req_0_addr_T_1 = {_prober_req_0_addr_T, 6'h0}; // @[dcache.scala:556:{32,93}] assign prober_req_0_addr = {8'h0, _prober_req_0_addr_T_1}; // @[dcache.scala:553:26, :556:{26,93}] wire _T_7 = io_lsu_req_ready_0 & io_lsu_req_valid_0; // @[Decoupled.scala:51:35] wire _s0_valid_T; // @[Decoupled.scala:51:35] assign _s0_valid_T = _T_7; // @[Decoupled.scala:51:35] wire _s0_req_T; // @[Decoupled.scala:51:35] assign _s0_req_T = _T_7; // @[Decoupled.scala:51:35] wire _s0_type_T; // @[Decoupled.scala:51:35] assign _s0_type_T = _T_7; // @[Decoupled.scala:51:35] wire _s0_send_resp_or_nack_T; // @[Decoupled.scala:51:35] assign _s0_send_resp_or_nack_T = _T_7; // @[Decoupled.scala:51:35] wire _s1_valid_T_7; // @[Decoupled.scala:51:35] assign _s1_valid_T_7 = _T_7; // @[Decoupled.scala:51:35] wire _mshrs_io_replay_ready_T; // @[dcache.scala:502:58] wire _GEN_0 = _mshrs_io_replay_ready_T & _mshrs_io_replay_valid; // @[Decoupled.scala:51:35] wire _s0_valid_T_1; // @[Decoupled.scala:51:35] assign _s0_valid_T_1 = _GEN_0; // @[Decoupled.scala:51:35] wire _s0_send_resp_or_nack_T_1; // @[Decoupled.scala:51:35] assign _s0_send_resp_or_nack_T_1 = _GEN_0; // @[Decoupled.scala:51:35] wire _s0_valid_T_2 = _s0_valid_T_1 | wb_fire; // @[Decoupled.scala:51:35] wire _s0_valid_T_3 = _s0_valid_T_2 | prober_fire; // @[Decoupled.scala:51:35] wire _s0_valid_T_4 = _s0_valid_T_3; // @[dcache.scala:580:{54,69}] wire _GEN_1 = _metaReadArb_io_in_3_ready & _mshrs_io_meta_read_valid; // @[Decoupled.scala:51:35] wire _s0_valid_T_5; // @[Decoupled.scala:51:35] assign _s0_valid_T_5 = _GEN_1; // @[Decoupled.scala:51:35] wire _s0_req_T_1; // @[Decoupled.scala:51:35] assign _s0_req_T_1 = _GEN_1; // @[Decoupled.scala:51:35] wire _s0_type_T_1; // @[Decoupled.scala:51:35] assign _s0_type_T_1 = _GEN_1; // @[Decoupled.scala:51:35] wire _s0_valid_T_6 = _s0_valid_T_4 | _s0_valid_T_5; // @[Decoupled.scala:51:35] wire _s0_valid_T_7_0 = _s0_valid_T_6; // @[dcache.scala:580:{21,86}] wire s0_valid_0 = _s0_valid_T ? _s0_valid_WIRE_0 : _s0_valid_T_7_0; // @[Decoupled.scala:51:35] wire [6:0] _s0_req_T_2_0_uop_uopc = _s0_req_T_1 ? 7'h0 : replay_req_0_uop_uopc; // @[Decoupled.scala:51:35] wire [31:0] _s0_req_T_2_0_uop_inst = _s0_req_T_1 ? 32'h0 : replay_req_0_uop_inst; // @[Decoupled.scala:51:35] wire [31:0] _s0_req_T_2_0_uop_debug_inst = _s0_req_T_1 ? 32'h0 : replay_req_0_uop_debug_inst; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_is_rvc = ~_s0_req_T_1 & replay_req_0_uop_is_rvc; // @[Decoupled.scala:51:35] wire [39:0] _s0_req_T_2_0_uop_debug_pc = _s0_req_T_1 ? 40'h0 : replay_req_0_uop_debug_pc; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_2_0_uop_iq_type = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_iq_type; // @[Decoupled.scala:51:35] wire [9:0] _s0_req_T_2_0_uop_fu_code = _s0_req_T_1 ? 10'h0 : replay_req_0_uop_fu_code; // @[Decoupled.scala:51:35] wire [3:0] _s0_req_T_2_0_uop_ctrl_br_type = _s0_req_T_1 ? 4'h0 : replay_req_0_uop_ctrl_br_type; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_2_0_uop_ctrl_op1_sel = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_ctrl_op1_sel; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_2_0_uop_ctrl_op2_sel = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_ctrl_op2_sel; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_2_0_uop_ctrl_imm_sel = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_ctrl_imm_sel; // @[Decoupled.scala:51:35] wire [4:0] _s0_req_T_2_0_uop_ctrl_op_fcn = _s0_req_T_1 ? 5'h0 : replay_req_0_uop_ctrl_op_fcn; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_ctrl_fcn_dw = ~_s0_req_T_1 & replay_req_0_uop_ctrl_fcn_dw; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_2_0_uop_ctrl_csr_cmd = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_ctrl_csr_cmd; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_ctrl_is_load = ~_s0_req_T_1 & replay_req_0_uop_ctrl_is_load; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_ctrl_is_sta = ~_s0_req_T_1 & replay_req_0_uop_ctrl_is_sta; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_ctrl_is_std = ~_s0_req_T_1 & replay_req_0_uop_ctrl_is_std; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_2_0_uop_iw_state = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_iw_state; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_iw_p1_poisoned = ~_s0_req_T_1 & replay_req_0_uop_iw_p1_poisoned; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_iw_p2_poisoned = ~_s0_req_T_1 & replay_req_0_uop_iw_p2_poisoned; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_is_br = ~_s0_req_T_1 & replay_req_0_uop_is_br; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_is_jalr = ~_s0_req_T_1 & replay_req_0_uop_is_jalr; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_is_jal = ~_s0_req_T_1 & replay_req_0_uop_is_jal; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_is_sfb = ~_s0_req_T_1 & replay_req_0_uop_is_sfb; // @[Decoupled.scala:51:35] wire [7:0] _s0_req_T_2_0_uop_br_mask = _s0_req_T_1 ? 8'h0 : replay_req_0_uop_br_mask; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_2_0_uop_br_tag = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_br_tag; // @[Decoupled.scala:51:35] wire [3:0] _s0_req_T_2_0_uop_ftq_idx = _s0_req_T_1 ? 4'h0 : replay_req_0_uop_ftq_idx; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_edge_inst = ~_s0_req_T_1 & replay_req_0_uop_edge_inst; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_2_0_uop_pc_lob = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_pc_lob; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_taken = ~_s0_req_T_1 & replay_req_0_uop_taken; // @[Decoupled.scala:51:35] wire [19:0] _s0_req_T_2_0_uop_imm_packed = _s0_req_T_1 ? 20'h0 : replay_req_0_uop_imm_packed; // @[Decoupled.scala:51:35] wire [11:0] _s0_req_T_2_0_uop_csr_addr = _s0_req_T_1 ? 12'h0 : replay_req_0_uop_csr_addr; // @[Decoupled.scala:51:35] wire [4:0] _s0_req_T_2_0_uop_rob_idx = _s0_req_T_1 ? 5'h0 : replay_req_0_uop_rob_idx; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_2_0_uop_ldq_idx = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_ldq_idx; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_2_0_uop_stq_idx = _s0_req_T_1 ? 3'h0 : replay_req_0_uop_stq_idx; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_2_0_uop_rxq_idx = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_rxq_idx; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_2_0_uop_pdst = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_pdst; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_2_0_uop_prs1 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_prs1; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_2_0_uop_prs2 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_prs2; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_2_0_uop_prs3 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_prs3; // @[Decoupled.scala:51:35] wire [3:0] _s0_req_T_2_0_uop_ppred = _s0_req_T_1 ? 4'h0 : replay_req_0_uop_ppred; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_prs1_busy = ~_s0_req_T_1 & replay_req_0_uop_prs1_busy; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_prs2_busy = ~_s0_req_T_1 & replay_req_0_uop_prs2_busy; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_prs3_busy = ~_s0_req_T_1 & replay_req_0_uop_prs3_busy; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_ppred_busy = ~_s0_req_T_1 & replay_req_0_uop_ppred_busy; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_2_0_uop_stale_pdst = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_stale_pdst; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_exception = ~_s0_req_T_1 & replay_req_0_uop_exception; // @[Decoupled.scala:51:35] wire [63:0] _s0_req_T_2_0_uop_exc_cause = _s0_req_T_1 ? 64'h0 : replay_req_0_uop_exc_cause; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_bypassable = ~_s0_req_T_1 & replay_req_0_uop_bypassable; // @[Decoupled.scala:51:35] wire [4:0] _s0_req_T_2_0_uop_mem_cmd = _s0_req_T_1 ? 5'h0 : replay_req_0_uop_mem_cmd; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_2_0_uop_mem_size = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_mem_size; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_mem_signed = ~_s0_req_T_1 & replay_req_0_uop_mem_signed; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_is_fence = ~_s0_req_T_1 & replay_req_0_uop_is_fence; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_is_fencei = ~_s0_req_T_1 & replay_req_0_uop_is_fencei; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_is_amo = ~_s0_req_T_1 & replay_req_0_uop_is_amo; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_uses_ldq = ~_s0_req_T_1 & replay_req_0_uop_uses_ldq; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_uses_stq = ~_s0_req_T_1 & replay_req_0_uop_uses_stq; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_is_sys_pc2epc = ~_s0_req_T_1 & replay_req_0_uop_is_sys_pc2epc; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_is_unique = ~_s0_req_T_1 & replay_req_0_uop_is_unique; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_flush_on_commit = ~_s0_req_T_1 & replay_req_0_uop_flush_on_commit; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_ldst_is_rs1 = ~_s0_req_T_1 & replay_req_0_uop_ldst_is_rs1; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_2_0_uop_ldst = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_ldst; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_2_0_uop_lrs1 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_lrs1; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_2_0_uop_lrs2 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_lrs2; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_2_0_uop_lrs3 = _s0_req_T_1 ? 6'h0 : replay_req_0_uop_lrs3; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_ldst_val = ~_s0_req_T_1 & replay_req_0_uop_ldst_val; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_2_0_uop_dst_rtype = _s0_req_T_1 ? 2'h2 : replay_req_0_uop_dst_rtype; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_2_0_uop_lrs1_rtype = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_lrs1_rtype; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_2_0_uop_lrs2_rtype = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_lrs2_rtype; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_frs3_en = ~_s0_req_T_1 & replay_req_0_uop_frs3_en; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_fp_val = ~_s0_req_T_1 & replay_req_0_uop_fp_val; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_fp_single = ~_s0_req_T_1 & replay_req_0_uop_fp_single; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_xcpt_pf_if = ~_s0_req_T_1 & replay_req_0_uop_xcpt_pf_if; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_xcpt_ae_if = ~_s0_req_T_1 & replay_req_0_uop_xcpt_ae_if; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_xcpt_ma_if = ~_s0_req_T_1 & replay_req_0_uop_xcpt_ma_if; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_bp_debug_if = ~_s0_req_T_1 & replay_req_0_uop_bp_debug_if; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_uop_bp_xcpt_if = ~_s0_req_T_1 & replay_req_0_uop_bp_xcpt_if; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_2_0_uop_debug_fsrc = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_debug_fsrc; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_2_0_uop_debug_tsrc = _s0_req_T_1 ? 2'h0 : replay_req_0_uop_debug_tsrc; // @[Decoupled.scala:51:35] wire [39:0] _s0_req_T_2_0_addr = _s0_req_T_1 ? mshr_read_req_0_addr : replay_req_0_addr; // @[Decoupled.scala:51:35] wire [63:0] _s0_req_T_2_0_data = _s0_req_T_1 ? 64'h0 : replay_req_0_data; // @[Decoupled.scala:51:35] wire _s0_req_T_2_0_is_hella = ~_s0_req_T_1 & replay_req_0_is_hella; // @[Decoupled.scala:51:35] wire [6:0] _s0_req_T_3_0_uop_uopc = _s0_req_T_2_0_uop_uopc; // @[dcache.scala:585:21, :586:21] wire [31:0] _s0_req_T_3_0_uop_inst = _s0_req_T_2_0_uop_inst; // @[dcache.scala:585:21, :586:21] wire [31:0] _s0_req_T_3_0_uop_debug_inst = _s0_req_T_2_0_uop_debug_inst; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_is_rvc = _s0_req_T_2_0_uop_is_rvc; // @[dcache.scala:585:21, :586:21] wire [39:0] _s0_req_T_3_0_uop_debug_pc = _s0_req_T_2_0_uop_debug_pc; // @[dcache.scala:585:21, :586:21] wire [2:0] _s0_req_T_3_0_uop_iq_type = _s0_req_T_2_0_uop_iq_type; // @[dcache.scala:585:21, :586:21] wire [9:0] _s0_req_T_3_0_uop_fu_code = _s0_req_T_2_0_uop_fu_code; // @[dcache.scala:585:21, :586:21] wire [3:0] _s0_req_T_3_0_uop_ctrl_br_type = _s0_req_T_2_0_uop_ctrl_br_type; // @[dcache.scala:585:21, :586:21] wire [1:0] _s0_req_T_3_0_uop_ctrl_op1_sel = _s0_req_T_2_0_uop_ctrl_op1_sel; // @[dcache.scala:585:21, :586:21] wire [2:0] _s0_req_T_3_0_uop_ctrl_op2_sel = _s0_req_T_2_0_uop_ctrl_op2_sel; // @[dcache.scala:585:21, :586:21] wire [2:0] _s0_req_T_3_0_uop_ctrl_imm_sel = _s0_req_T_2_0_uop_ctrl_imm_sel; // @[dcache.scala:585:21, :586:21] wire [4:0] _s0_req_T_3_0_uop_ctrl_op_fcn = _s0_req_T_2_0_uop_ctrl_op_fcn; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_ctrl_fcn_dw = _s0_req_T_2_0_uop_ctrl_fcn_dw; // @[dcache.scala:585:21, :586:21] wire [2:0] _s0_req_T_3_0_uop_ctrl_csr_cmd = _s0_req_T_2_0_uop_ctrl_csr_cmd; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_ctrl_is_load = _s0_req_T_2_0_uop_ctrl_is_load; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_ctrl_is_sta = _s0_req_T_2_0_uop_ctrl_is_sta; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_ctrl_is_std = _s0_req_T_2_0_uop_ctrl_is_std; // @[dcache.scala:585:21, :586:21] wire [1:0] _s0_req_T_3_0_uop_iw_state = _s0_req_T_2_0_uop_iw_state; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_iw_p1_poisoned = _s0_req_T_2_0_uop_iw_p1_poisoned; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_iw_p2_poisoned = _s0_req_T_2_0_uop_iw_p2_poisoned; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_is_br = _s0_req_T_2_0_uop_is_br; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_is_jalr = _s0_req_T_2_0_uop_is_jalr; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_is_jal = _s0_req_T_2_0_uop_is_jal; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_is_sfb = _s0_req_T_2_0_uop_is_sfb; // @[dcache.scala:585:21, :586:21] wire [7:0] _s0_req_T_3_0_uop_br_mask = _s0_req_T_2_0_uop_br_mask; // @[dcache.scala:585:21, :586:21] wire [2:0] _s0_req_T_3_0_uop_br_tag = _s0_req_T_2_0_uop_br_tag; // @[dcache.scala:585:21, :586:21] wire [3:0] _s0_req_T_3_0_uop_ftq_idx = _s0_req_T_2_0_uop_ftq_idx; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_edge_inst = _s0_req_T_2_0_uop_edge_inst; // @[dcache.scala:585:21, :586:21] wire [5:0] _s0_req_T_3_0_uop_pc_lob = _s0_req_T_2_0_uop_pc_lob; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_taken = _s0_req_T_2_0_uop_taken; // @[dcache.scala:585:21, :586:21] wire [19:0] _s0_req_T_3_0_uop_imm_packed = _s0_req_T_2_0_uop_imm_packed; // @[dcache.scala:585:21, :586:21] wire [11:0] _s0_req_T_3_0_uop_csr_addr = _s0_req_T_2_0_uop_csr_addr; // @[dcache.scala:585:21, :586:21] wire [4:0] _s0_req_T_3_0_uop_rob_idx = _s0_req_T_2_0_uop_rob_idx; // @[dcache.scala:585:21, :586:21] wire [2:0] _s0_req_T_3_0_uop_ldq_idx = _s0_req_T_2_0_uop_ldq_idx; // @[dcache.scala:585:21, :586:21] wire [2:0] _s0_req_T_3_0_uop_stq_idx = _s0_req_T_2_0_uop_stq_idx; // @[dcache.scala:585:21, :586:21] wire [1:0] _s0_req_T_3_0_uop_rxq_idx = _s0_req_T_2_0_uop_rxq_idx; // @[dcache.scala:585:21, :586:21] wire [5:0] _s0_req_T_3_0_uop_pdst = _s0_req_T_2_0_uop_pdst; // @[dcache.scala:585:21, :586:21] wire [5:0] _s0_req_T_3_0_uop_prs1 = _s0_req_T_2_0_uop_prs1; // @[dcache.scala:585:21, :586:21] wire [5:0] _s0_req_T_3_0_uop_prs2 = _s0_req_T_2_0_uop_prs2; // @[dcache.scala:585:21, :586:21] wire [5:0] _s0_req_T_3_0_uop_prs3 = _s0_req_T_2_0_uop_prs3; // @[dcache.scala:585:21, :586:21] wire [3:0] _s0_req_T_3_0_uop_ppred = _s0_req_T_2_0_uop_ppred; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_prs1_busy = _s0_req_T_2_0_uop_prs1_busy; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_prs2_busy = _s0_req_T_2_0_uop_prs2_busy; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_prs3_busy = _s0_req_T_2_0_uop_prs3_busy; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_ppred_busy = _s0_req_T_2_0_uop_ppred_busy; // @[dcache.scala:585:21, :586:21] wire [5:0] _s0_req_T_3_0_uop_stale_pdst = _s0_req_T_2_0_uop_stale_pdst; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_exception = _s0_req_T_2_0_uop_exception; // @[dcache.scala:585:21, :586:21] wire [63:0] _s0_req_T_3_0_uop_exc_cause = _s0_req_T_2_0_uop_exc_cause; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_bypassable = _s0_req_T_2_0_uop_bypassable; // @[dcache.scala:585:21, :586:21] wire [4:0] _s0_req_T_3_0_uop_mem_cmd = _s0_req_T_2_0_uop_mem_cmd; // @[dcache.scala:585:21, :586:21] wire [1:0] _s0_req_T_3_0_uop_mem_size = _s0_req_T_2_0_uop_mem_size; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_mem_signed = _s0_req_T_2_0_uop_mem_signed; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_is_fence = _s0_req_T_2_0_uop_is_fence; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_is_fencei = _s0_req_T_2_0_uop_is_fencei; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_is_amo = _s0_req_T_2_0_uop_is_amo; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_uses_ldq = _s0_req_T_2_0_uop_uses_ldq; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_uses_stq = _s0_req_T_2_0_uop_uses_stq; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_is_sys_pc2epc = _s0_req_T_2_0_uop_is_sys_pc2epc; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_is_unique = _s0_req_T_2_0_uop_is_unique; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_flush_on_commit = _s0_req_T_2_0_uop_flush_on_commit; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_ldst_is_rs1 = _s0_req_T_2_0_uop_ldst_is_rs1; // @[dcache.scala:585:21, :586:21] wire [5:0] _s0_req_T_3_0_uop_ldst = _s0_req_T_2_0_uop_ldst; // @[dcache.scala:585:21, :586:21] wire [5:0] _s0_req_T_3_0_uop_lrs1 = _s0_req_T_2_0_uop_lrs1; // @[dcache.scala:585:21, :586:21] wire [5:0] _s0_req_T_3_0_uop_lrs2 = _s0_req_T_2_0_uop_lrs2; // @[dcache.scala:585:21, :586:21] wire [5:0] _s0_req_T_3_0_uop_lrs3 = _s0_req_T_2_0_uop_lrs3; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_ldst_val = _s0_req_T_2_0_uop_ldst_val; // @[dcache.scala:585:21, :586:21] wire [1:0] _s0_req_T_3_0_uop_dst_rtype = _s0_req_T_2_0_uop_dst_rtype; // @[dcache.scala:585:21, :586:21] wire [1:0] _s0_req_T_3_0_uop_lrs1_rtype = _s0_req_T_2_0_uop_lrs1_rtype; // @[dcache.scala:585:21, :586:21] wire [1:0] _s0_req_T_3_0_uop_lrs2_rtype = _s0_req_T_2_0_uop_lrs2_rtype; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_frs3_en = _s0_req_T_2_0_uop_frs3_en; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_fp_val = _s0_req_T_2_0_uop_fp_val; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_fp_single = _s0_req_T_2_0_uop_fp_single; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_xcpt_pf_if = _s0_req_T_2_0_uop_xcpt_pf_if; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_xcpt_ae_if = _s0_req_T_2_0_uop_xcpt_ae_if; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_xcpt_ma_if = _s0_req_T_2_0_uop_xcpt_ma_if; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_bp_debug_if = _s0_req_T_2_0_uop_bp_debug_if; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_uop_bp_xcpt_if = _s0_req_T_2_0_uop_bp_xcpt_if; // @[dcache.scala:585:21, :586:21] wire [1:0] _s0_req_T_3_0_uop_debug_fsrc = _s0_req_T_2_0_uop_debug_fsrc; // @[dcache.scala:585:21, :586:21] wire [1:0] _s0_req_T_3_0_uop_debug_tsrc = _s0_req_T_2_0_uop_debug_tsrc; // @[dcache.scala:585:21, :586:21] wire [39:0] _s0_req_T_3_0_addr = _s0_req_T_2_0_addr; // @[dcache.scala:585:21, :586:21] wire [63:0] _s0_req_T_3_0_data = _s0_req_T_2_0_data; // @[dcache.scala:585:21, :586:21] wire _s0_req_T_3_0_is_hella = _s0_req_T_2_0_is_hella; // @[dcache.scala:585:21, :586:21] wire [6:0] _s0_req_T_4_0_uop_uopc = prober_fire ? 7'h0 : _s0_req_T_3_0_uop_uopc; // @[Decoupled.scala:51:35] wire [31:0] _s0_req_T_4_0_uop_inst = prober_fire ? 32'h0 : _s0_req_T_3_0_uop_inst; // @[Decoupled.scala:51:35] wire [31:0] _s0_req_T_4_0_uop_debug_inst = prober_fire ? 32'h0 : _s0_req_T_3_0_uop_debug_inst; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_is_rvc = ~prober_fire & _s0_req_T_3_0_uop_is_rvc; // @[Decoupled.scala:51:35] wire [39:0] _s0_req_T_4_0_uop_debug_pc = prober_fire ? 40'h0 : _s0_req_T_3_0_uop_debug_pc; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_4_0_uop_iq_type = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_iq_type; // @[Decoupled.scala:51:35] wire [9:0] _s0_req_T_4_0_uop_fu_code = prober_fire ? 10'h0 : _s0_req_T_3_0_uop_fu_code; // @[Decoupled.scala:51:35] wire [3:0] _s0_req_T_4_0_uop_ctrl_br_type = prober_fire ? 4'h0 : _s0_req_T_3_0_uop_ctrl_br_type; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_4_0_uop_ctrl_op1_sel = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_ctrl_op1_sel; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_4_0_uop_ctrl_op2_sel = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_ctrl_op2_sel; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_4_0_uop_ctrl_imm_sel = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_ctrl_imm_sel; // @[Decoupled.scala:51:35] wire [4:0] _s0_req_T_4_0_uop_ctrl_op_fcn = prober_fire ? 5'h0 : _s0_req_T_3_0_uop_ctrl_op_fcn; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_ctrl_fcn_dw = ~prober_fire & _s0_req_T_3_0_uop_ctrl_fcn_dw; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_4_0_uop_ctrl_csr_cmd = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_ctrl_csr_cmd; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_ctrl_is_load = ~prober_fire & _s0_req_T_3_0_uop_ctrl_is_load; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_ctrl_is_sta = ~prober_fire & _s0_req_T_3_0_uop_ctrl_is_sta; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_ctrl_is_std = ~prober_fire & _s0_req_T_3_0_uop_ctrl_is_std; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_4_0_uop_iw_state = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_iw_state; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_iw_p1_poisoned = ~prober_fire & _s0_req_T_3_0_uop_iw_p1_poisoned; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_iw_p2_poisoned = ~prober_fire & _s0_req_T_3_0_uop_iw_p2_poisoned; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_is_br = ~prober_fire & _s0_req_T_3_0_uop_is_br; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_is_jalr = ~prober_fire & _s0_req_T_3_0_uop_is_jalr; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_is_jal = ~prober_fire & _s0_req_T_3_0_uop_is_jal; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_is_sfb = ~prober_fire & _s0_req_T_3_0_uop_is_sfb; // @[Decoupled.scala:51:35] wire [7:0] _s0_req_T_4_0_uop_br_mask = prober_fire ? 8'h0 : _s0_req_T_3_0_uop_br_mask; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_4_0_uop_br_tag = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_br_tag; // @[Decoupled.scala:51:35] wire [3:0] _s0_req_T_4_0_uop_ftq_idx = prober_fire ? 4'h0 : _s0_req_T_3_0_uop_ftq_idx; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_edge_inst = ~prober_fire & _s0_req_T_3_0_uop_edge_inst; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_4_0_uop_pc_lob = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_pc_lob; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_taken = ~prober_fire & _s0_req_T_3_0_uop_taken; // @[Decoupled.scala:51:35] wire [19:0] _s0_req_T_4_0_uop_imm_packed = prober_fire ? 20'h0 : _s0_req_T_3_0_uop_imm_packed; // @[Decoupled.scala:51:35] wire [11:0] _s0_req_T_4_0_uop_csr_addr = prober_fire ? 12'h0 : _s0_req_T_3_0_uop_csr_addr; // @[Decoupled.scala:51:35] wire [4:0] _s0_req_T_4_0_uop_rob_idx = prober_fire ? 5'h0 : _s0_req_T_3_0_uop_rob_idx; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_4_0_uop_ldq_idx = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_ldq_idx; // @[Decoupled.scala:51:35] wire [2:0] _s0_req_T_4_0_uop_stq_idx = prober_fire ? 3'h0 : _s0_req_T_3_0_uop_stq_idx; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_4_0_uop_rxq_idx = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_rxq_idx; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_4_0_uop_pdst = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_pdst; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_4_0_uop_prs1 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_prs1; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_4_0_uop_prs2 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_prs2; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_4_0_uop_prs3 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_prs3; // @[Decoupled.scala:51:35] wire [3:0] _s0_req_T_4_0_uop_ppred = prober_fire ? 4'h0 : _s0_req_T_3_0_uop_ppred; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_prs1_busy = ~prober_fire & _s0_req_T_3_0_uop_prs1_busy; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_prs2_busy = ~prober_fire & _s0_req_T_3_0_uop_prs2_busy; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_prs3_busy = ~prober_fire & _s0_req_T_3_0_uop_prs3_busy; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_ppred_busy = ~prober_fire & _s0_req_T_3_0_uop_ppred_busy; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_4_0_uop_stale_pdst = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_stale_pdst; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_exception = ~prober_fire & _s0_req_T_3_0_uop_exception; // @[Decoupled.scala:51:35] wire [63:0] _s0_req_T_4_0_uop_exc_cause = prober_fire ? 64'h0 : _s0_req_T_3_0_uop_exc_cause; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_bypassable = ~prober_fire & _s0_req_T_3_0_uop_bypassable; // @[Decoupled.scala:51:35] wire [4:0] _s0_req_T_4_0_uop_mem_cmd = prober_fire ? 5'h0 : _s0_req_T_3_0_uop_mem_cmd; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_4_0_uop_mem_size = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_mem_size; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_mem_signed = ~prober_fire & _s0_req_T_3_0_uop_mem_signed; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_is_fence = ~prober_fire & _s0_req_T_3_0_uop_is_fence; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_is_fencei = ~prober_fire & _s0_req_T_3_0_uop_is_fencei; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_is_amo = ~prober_fire & _s0_req_T_3_0_uop_is_amo; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_uses_ldq = ~prober_fire & _s0_req_T_3_0_uop_uses_ldq; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_uses_stq = ~prober_fire & _s0_req_T_3_0_uop_uses_stq; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_is_sys_pc2epc = ~prober_fire & _s0_req_T_3_0_uop_is_sys_pc2epc; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_is_unique = ~prober_fire & _s0_req_T_3_0_uop_is_unique; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_flush_on_commit = ~prober_fire & _s0_req_T_3_0_uop_flush_on_commit; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_ldst_is_rs1 = ~prober_fire & _s0_req_T_3_0_uop_ldst_is_rs1; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_4_0_uop_ldst = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_ldst; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_4_0_uop_lrs1 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_lrs1; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_4_0_uop_lrs2 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_lrs2; // @[Decoupled.scala:51:35] wire [5:0] _s0_req_T_4_0_uop_lrs3 = prober_fire ? 6'h0 : _s0_req_T_3_0_uop_lrs3; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_ldst_val = ~prober_fire & _s0_req_T_3_0_uop_ldst_val; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_4_0_uop_dst_rtype = prober_fire ? 2'h2 : _s0_req_T_3_0_uop_dst_rtype; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_4_0_uop_lrs1_rtype = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_lrs1_rtype; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_4_0_uop_lrs2_rtype = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_lrs2_rtype; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_frs3_en = ~prober_fire & _s0_req_T_3_0_uop_frs3_en; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_fp_val = ~prober_fire & _s0_req_T_3_0_uop_fp_val; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_fp_single = ~prober_fire & _s0_req_T_3_0_uop_fp_single; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_xcpt_pf_if = ~prober_fire & _s0_req_T_3_0_uop_xcpt_pf_if; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_xcpt_ae_if = ~prober_fire & _s0_req_T_3_0_uop_xcpt_ae_if; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_xcpt_ma_if = ~prober_fire & _s0_req_T_3_0_uop_xcpt_ma_if; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_bp_debug_if = ~prober_fire & _s0_req_T_3_0_uop_bp_debug_if; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_uop_bp_xcpt_if = ~prober_fire & _s0_req_T_3_0_uop_bp_xcpt_if; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_4_0_uop_debug_fsrc = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_debug_fsrc; // @[Decoupled.scala:51:35] wire [1:0] _s0_req_T_4_0_uop_debug_tsrc = prober_fire ? 2'h0 : _s0_req_T_3_0_uop_debug_tsrc; // @[Decoupled.scala:51:35] wire [39:0] _s0_req_T_4_0_addr = prober_fire ? prober_req_0_addr : _s0_req_T_3_0_addr; // @[Decoupled.scala:51:35] wire [63:0] _s0_req_T_4_0_data = prober_fire ? 64'h0 : _s0_req_T_3_0_data; // @[Decoupled.scala:51:35] wire _s0_req_T_4_0_is_hella = ~prober_fire & _s0_req_T_3_0_is_hella; // @[Decoupled.scala:51:35] wire [6:0] _s0_req_T_5_0_uop_uopc = wb_fire ? 7'h0 : _s0_req_T_4_0_uop_uopc; // @[dcache.scala:531:38, :583:21, :584:21] wire [31:0] _s0_req_T_5_0_uop_inst = wb_fire ? 32'h0 : _s0_req_T_4_0_uop_inst; // @[dcache.scala:531:38, :583:21, :584:21] wire [31:0] _s0_req_T_5_0_uop_debug_inst = wb_fire ? 32'h0 : _s0_req_T_4_0_uop_debug_inst; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_is_rvc = ~wb_fire & _s0_req_T_4_0_uop_is_rvc; // @[dcache.scala:531:38, :583:21, :584:21] wire [39:0] _s0_req_T_5_0_uop_debug_pc = wb_fire ? 40'h0 : _s0_req_T_4_0_uop_debug_pc; // @[dcache.scala:531:38, :583:21, :584:21] wire [2:0] _s0_req_T_5_0_uop_iq_type = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_iq_type; // @[dcache.scala:531:38, :583:21, :584:21] wire [9:0] _s0_req_T_5_0_uop_fu_code = wb_fire ? 10'h0 : _s0_req_T_4_0_uop_fu_code; // @[dcache.scala:531:38, :583:21, :584:21] wire [3:0] _s0_req_T_5_0_uop_ctrl_br_type = wb_fire ? 4'h0 : _s0_req_T_4_0_uop_ctrl_br_type; // @[dcache.scala:531:38, :583:21, :584:21] wire [1:0] _s0_req_T_5_0_uop_ctrl_op1_sel = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_ctrl_op1_sel; // @[dcache.scala:531:38, :583:21, :584:21] wire [2:0] _s0_req_T_5_0_uop_ctrl_op2_sel = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_ctrl_op2_sel; // @[dcache.scala:531:38, :583:21, :584:21] wire [2:0] _s0_req_T_5_0_uop_ctrl_imm_sel = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_ctrl_imm_sel; // @[dcache.scala:531:38, :583:21, :584:21] wire [4:0] _s0_req_T_5_0_uop_ctrl_op_fcn = wb_fire ? 5'h0 : _s0_req_T_4_0_uop_ctrl_op_fcn; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_ctrl_fcn_dw = ~wb_fire & _s0_req_T_4_0_uop_ctrl_fcn_dw; // @[dcache.scala:531:38, :583:21, :584:21] wire [2:0] _s0_req_T_5_0_uop_ctrl_csr_cmd = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_ctrl_csr_cmd; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_ctrl_is_load = ~wb_fire & _s0_req_T_4_0_uop_ctrl_is_load; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_ctrl_is_sta = ~wb_fire & _s0_req_T_4_0_uop_ctrl_is_sta; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_ctrl_is_std = ~wb_fire & _s0_req_T_4_0_uop_ctrl_is_std; // @[dcache.scala:531:38, :583:21, :584:21] wire [1:0] _s0_req_T_5_0_uop_iw_state = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_iw_state; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_iw_p1_poisoned = ~wb_fire & _s0_req_T_4_0_uop_iw_p1_poisoned; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_iw_p2_poisoned = ~wb_fire & _s0_req_T_4_0_uop_iw_p2_poisoned; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_is_br = ~wb_fire & _s0_req_T_4_0_uop_is_br; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_is_jalr = ~wb_fire & _s0_req_T_4_0_uop_is_jalr; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_is_jal = ~wb_fire & _s0_req_T_4_0_uop_is_jal; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_is_sfb = ~wb_fire & _s0_req_T_4_0_uop_is_sfb; // @[dcache.scala:531:38, :583:21, :584:21] wire [7:0] _s0_req_T_5_0_uop_br_mask = wb_fire ? 8'h0 : _s0_req_T_4_0_uop_br_mask; // @[dcache.scala:531:38, :583:21, :584:21] wire [2:0] _s0_req_T_5_0_uop_br_tag = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_br_tag; // @[dcache.scala:531:38, :583:21, :584:21] wire [3:0] _s0_req_T_5_0_uop_ftq_idx = wb_fire ? 4'h0 : _s0_req_T_4_0_uop_ftq_idx; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_edge_inst = ~wb_fire & _s0_req_T_4_0_uop_edge_inst; // @[dcache.scala:531:38, :583:21, :584:21] wire [5:0] _s0_req_T_5_0_uop_pc_lob = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_pc_lob; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_taken = ~wb_fire & _s0_req_T_4_0_uop_taken; // @[dcache.scala:531:38, :583:21, :584:21] wire [19:0] _s0_req_T_5_0_uop_imm_packed = wb_fire ? 20'h0 : _s0_req_T_4_0_uop_imm_packed; // @[dcache.scala:531:38, :583:21, :584:21] wire [11:0] _s0_req_T_5_0_uop_csr_addr = wb_fire ? 12'h0 : _s0_req_T_4_0_uop_csr_addr; // @[dcache.scala:531:38, :583:21, :584:21] wire [4:0] _s0_req_T_5_0_uop_rob_idx = wb_fire ? 5'h0 : _s0_req_T_4_0_uop_rob_idx; // @[dcache.scala:531:38, :583:21, :584:21] wire [2:0] _s0_req_T_5_0_uop_ldq_idx = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_ldq_idx; // @[dcache.scala:531:38, :583:21, :584:21] wire [2:0] _s0_req_T_5_0_uop_stq_idx = wb_fire ? 3'h0 : _s0_req_T_4_0_uop_stq_idx; // @[dcache.scala:531:38, :583:21, :584:21] wire [1:0] _s0_req_T_5_0_uop_rxq_idx = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_rxq_idx; // @[dcache.scala:531:38, :583:21, :584:21] wire [5:0] _s0_req_T_5_0_uop_pdst = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_pdst; // @[dcache.scala:531:38, :583:21, :584:21] wire [5:0] _s0_req_T_5_0_uop_prs1 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_prs1; // @[dcache.scala:531:38, :583:21, :584:21] wire [5:0] _s0_req_T_5_0_uop_prs2 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_prs2; // @[dcache.scala:531:38, :583:21, :584:21] wire [5:0] _s0_req_T_5_0_uop_prs3 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_prs3; // @[dcache.scala:531:38, :583:21, :584:21] wire [3:0] _s0_req_T_5_0_uop_ppred = wb_fire ? 4'h0 : _s0_req_T_4_0_uop_ppred; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_prs1_busy = ~wb_fire & _s0_req_T_4_0_uop_prs1_busy; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_prs2_busy = ~wb_fire & _s0_req_T_4_0_uop_prs2_busy; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_prs3_busy = ~wb_fire & _s0_req_T_4_0_uop_prs3_busy; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_ppred_busy = ~wb_fire & _s0_req_T_4_0_uop_ppred_busy; // @[dcache.scala:531:38, :583:21, :584:21] wire [5:0] _s0_req_T_5_0_uop_stale_pdst = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_stale_pdst; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_exception = ~wb_fire & _s0_req_T_4_0_uop_exception; // @[dcache.scala:531:38, :583:21, :584:21] wire [63:0] _s0_req_T_5_0_uop_exc_cause = wb_fire ? 64'h0 : _s0_req_T_4_0_uop_exc_cause; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_bypassable = ~wb_fire & _s0_req_T_4_0_uop_bypassable; // @[dcache.scala:531:38, :583:21, :584:21] wire [4:0] _s0_req_T_5_0_uop_mem_cmd = wb_fire ? 5'h0 : _s0_req_T_4_0_uop_mem_cmd; // @[dcache.scala:531:38, :583:21, :584:21] wire [1:0] _s0_req_T_5_0_uop_mem_size = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_mem_size; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_mem_signed = ~wb_fire & _s0_req_T_4_0_uop_mem_signed; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_is_fence = ~wb_fire & _s0_req_T_4_0_uop_is_fence; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_is_fencei = ~wb_fire & _s0_req_T_4_0_uop_is_fencei; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_is_amo = ~wb_fire & _s0_req_T_4_0_uop_is_amo; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_uses_ldq = ~wb_fire & _s0_req_T_4_0_uop_uses_ldq; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_uses_stq = ~wb_fire & _s0_req_T_4_0_uop_uses_stq; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_is_sys_pc2epc = ~wb_fire & _s0_req_T_4_0_uop_is_sys_pc2epc; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_is_unique = ~wb_fire & _s0_req_T_4_0_uop_is_unique; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_flush_on_commit = ~wb_fire & _s0_req_T_4_0_uop_flush_on_commit; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_ldst_is_rs1 = ~wb_fire & _s0_req_T_4_0_uop_ldst_is_rs1; // @[dcache.scala:531:38, :583:21, :584:21] wire [5:0] _s0_req_T_5_0_uop_ldst = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_ldst; // @[dcache.scala:531:38, :583:21, :584:21] wire [5:0] _s0_req_T_5_0_uop_lrs1 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_lrs1; // @[dcache.scala:531:38, :583:21, :584:21] wire [5:0] _s0_req_T_5_0_uop_lrs2 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_lrs2; // @[dcache.scala:531:38, :583:21, :584:21] wire [5:0] _s0_req_T_5_0_uop_lrs3 = wb_fire ? 6'h0 : _s0_req_T_4_0_uop_lrs3; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_ldst_val = ~wb_fire & _s0_req_T_4_0_uop_ldst_val; // @[dcache.scala:531:38, :583:21, :584:21] wire [1:0] _s0_req_T_5_0_uop_dst_rtype = wb_fire ? 2'h2 : _s0_req_T_4_0_uop_dst_rtype; // @[dcache.scala:531:38, :583:21, :584:21] wire [1:0] _s0_req_T_5_0_uop_lrs1_rtype = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_lrs1_rtype; // @[dcache.scala:531:38, :583:21, :584:21] wire [1:0] _s0_req_T_5_0_uop_lrs2_rtype = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_lrs2_rtype; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_frs3_en = ~wb_fire & _s0_req_T_4_0_uop_frs3_en; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_fp_val = ~wb_fire & _s0_req_T_4_0_uop_fp_val; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_fp_single = ~wb_fire & _s0_req_T_4_0_uop_fp_single; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_xcpt_pf_if = ~wb_fire & _s0_req_T_4_0_uop_xcpt_pf_if; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_xcpt_ae_if = ~wb_fire & _s0_req_T_4_0_uop_xcpt_ae_if; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_xcpt_ma_if = ~wb_fire & _s0_req_T_4_0_uop_xcpt_ma_if; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_bp_debug_if = ~wb_fire & _s0_req_T_4_0_uop_bp_debug_if; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_uop_bp_xcpt_if = ~wb_fire & _s0_req_T_4_0_uop_bp_xcpt_if; // @[dcache.scala:531:38, :583:21, :584:21] wire [1:0] _s0_req_T_5_0_uop_debug_fsrc = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_debug_fsrc; // @[dcache.scala:531:38, :583:21, :584:21] wire [1:0] _s0_req_T_5_0_uop_debug_tsrc = wb_fire ? 2'h0 : _s0_req_T_4_0_uop_debug_tsrc; // @[dcache.scala:531:38, :583:21, :584:21] wire [39:0] _s0_req_T_5_0_addr = wb_fire ? wb_req_0_addr : _s0_req_T_4_0_addr; // @[dcache.scala:531:38, :532:20, :583:21, :584:21] wire [63:0] _s0_req_T_5_0_data = wb_fire ? 64'h0 : _s0_req_T_4_0_data; // @[dcache.scala:531:38, :583:21, :584:21] wire _s0_req_T_5_0_is_hella = ~wb_fire & _s0_req_T_4_0_is_hella; // @[dcache.scala:531:38, :583:21, :584:21] wire [6:0] s0_req_0_uop_uopc = _s0_req_T ? _s0_req_WIRE_0_uop_uopc : _s0_req_T_5_0_uop_uopc; // @[Decoupled.scala:51:35] wire [31:0] s0_req_0_uop_inst = _s0_req_T ? _s0_req_WIRE_0_uop_inst : _s0_req_T_5_0_uop_inst; // @[Decoupled.scala:51:35] wire [31:0] s0_req_0_uop_debug_inst = _s0_req_T ? _s0_req_WIRE_0_uop_debug_inst : _s0_req_T_5_0_uop_debug_inst; // @[Decoupled.scala:51:35] wire s0_req_0_uop_is_rvc = _s0_req_T ? _s0_req_WIRE_0_uop_is_rvc : _s0_req_T_5_0_uop_is_rvc; // @[Decoupled.scala:51:35] wire [39:0] s0_req_0_uop_debug_pc = _s0_req_T ? _s0_req_WIRE_0_uop_debug_pc : _s0_req_T_5_0_uop_debug_pc; // @[Decoupled.scala:51:35] wire [2:0] s0_req_0_uop_iq_type = _s0_req_T ? _s0_req_WIRE_0_uop_iq_type : _s0_req_T_5_0_uop_iq_type; // @[Decoupled.scala:51:35] wire [9:0] s0_req_0_uop_fu_code = _s0_req_T ? _s0_req_WIRE_0_uop_fu_code : _s0_req_T_5_0_uop_fu_code; // @[Decoupled.scala:51:35] wire [3:0] s0_req_0_uop_ctrl_br_type = _s0_req_T ? _s0_req_WIRE_0_uop_ctrl_br_type : _s0_req_T_5_0_uop_ctrl_br_type; // @[Decoupled.scala:51:35] wire [1:0] s0_req_0_uop_ctrl_op1_sel = _s0_req_T ? _s0_req_WIRE_0_uop_ctrl_op1_sel : _s0_req_T_5_0_uop_ctrl_op1_sel; // @[Decoupled.scala:51:35] wire [2:0] s0_req_0_uop_ctrl_op2_sel = _s0_req_T ? _s0_req_WIRE_0_uop_ctrl_op2_sel : _s0_req_T_5_0_uop_ctrl_op2_sel; // @[Decoupled.scala:51:35] wire [2:0] s0_req_0_uop_ctrl_imm_sel = _s0_req_T ? _s0_req_WIRE_0_uop_ctrl_imm_sel : _s0_req_T_5_0_uop_ctrl_imm_sel; // @[Decoupled.scala:51:35] wire [4:0] s0_req_0_uop_ctrl_op_fcn = _s0_req_T ? _s0_req_WIRE_0_uop_ctrl_op_fcn : _s0_req_T_5_0_uop_ctrl_op_fcn; // @[Decoupled.scala:51:35] wire s0_req_0_uop_ctrl_fcn_dw = _s0_req_T ? _s0_req_WIRE_0_uop_ctrl_fcn_dw : _s0_req_T_5_0_uop_ctrl_fcn_dw; // @[Decoupled.scala:51:35] wire [2:0] s0_req_0_uop_ctrl_csr_cmd = _s0_req_T ? _s0_req_WIRE_0_uop_ctrl_csr_cmd : _s0_req_T_5_0_uop_ctrl_csr_cmd; // @[Decoupled.scala:51:35] wire s0_req_0_uop_ctrl_is_load = _s0_req_T ? _s0_req_WIRE_0_uop_ctrl_is_load : _s0_req_T_5_0_uop_ctrl_is_load; // @[Decoupled.scala:51:35] wire s0_req_0_uop_ctrl_is_sta = _s0_req_T ? _s0_req_WIRE_0_uop_ctrl_is_sta : _s0_req_T_5_0_uop_ctrl_is_sta; // @[Decoupled.scala:51:35] wire s0_req_0_uop_ctrl_is_std = _s0_req_T ? _s0_req_WIRE_0_uop_ctrl_is_std : _s0_req_T_5_0_uop_ctrl_is_std; // @[Decoupled.scala:51:35] wire [1:0] s0_req_0_uop_iw_state = _s0_req_T ? _s0_req_WIRE_0_uop_iw_state : _s0_req_T_5_0_uop_iw_state; // @[Decoupled.scala:51:35] wire s0_req_0_uop_iw_p1_poisoned = _s0_req_T ? _s0_req_WIRE_0_uop_iw_p1_poisoned : _s0_req_T_5_0_uop_iw_p1_poisoned; // @[Decoupled.scala:51:35] wire s0_req_0_uop_iw_p2_poisoned = _s0_req_T ? _s0_req_WIRE_0_uop_iw_p2_poisoned : _s0_req_T_5_0_uop_iw_p2_poisoned; // @[Decoupled.scala:51:35] wire s0_req_0_uop_is_br = _s0_req_T ? _s0_req_WIRE_0_uop_is_br : _s0_req_T_5_0_uop_is_br; // @[Decoupled.scala:51:35] wire s0_req_0_uop_is_jalr = _s0_req_T ? _s0_req_WIRE_0_uop_is_jalr : _s0_req_T_5_0_uop_is_jalr; // @[Decoupled.scala:51:35] wire s0_req_0_uop_is_jal = _s0_req_T ? _s0_req_WIRE_0_uop_is_jal : _s0_req_T_5_0_uop_is_jal; // @[Decoupled.scala:51:35] wire s0_req_0_uop_is_sfb = _s0_req_T ? _s0_req_WIRE_0_uop_is_sfb : _s0_req_T_5_0_uop_is_sfb; // @[Decoupled.scala:51:35] wire [7:0] s0_req_0_uop_br_mask = _s0_req_T ? _s0_req_WIRE_0_uop_br_mask : _s0_req_T_5_0_uop_br_mask; // @[Decoupled.scala:51:35] wire [2:0] s0_req_0_uop_br_tag = _s0_req_T ? _s0_req_WIRE_0_uop_br_tag : _s0_req_T_5_0_uop_br_tag; // @[Decoupled.scala:51:35] wire [3:0] s0_req_0_uop_ftq_idx = _s0_req_T ? _s0_req_WIRE_0_uop_ftq_idx : _s0_req_T_5_0_uop_ftq_idx; // @[Decoupled.scala:51:35] wire s0_req_0_uop_edge_inst = _s0_req_T ? _s0_req_WIRE_0_uop_edge_inst : _s0_req_T_5_0_uop_edge_inst; // @[Decoupled.scala:51:35] wire [5:0] s0_req_0_uop_pc_lob = _s0_req_T ? _s0_req_WIRE_0_uop_pc_lob : _s0_req_T_5_0_uop_pc_lob; // @[Decoupled.scala:51:35] wire s0_req_0_uop_taken = _s0_req_T ? _s0_req_WIRE_0_uop_taken : _s0_req_T_5_0_uop_taken; // @[Decoupled.scala:51:35] wire [19:0] s0_req_0_uop_imm_packed = _s0_req_T ? _s0_req_WIRE_0_uop_imm_packed : _s0_req_T_5_0_uop_imm_packed; // @[Decoupled.scala:51:35] wire [11:0] s0_req_0_uop_csr_addr = _s0_req_T ? _s0_req_WIRE_0_uop_csr_addr : _s0_req_T_5_0_uop_csr_addr; // @[Decoupled.scala:51:35] wire [4:0] s0_req_0_uop_rob_idx = _s0_req_T ? _s0_req_WIRE_0_uop_rob_idx : _s0_req_T_5_0_uop_rob_idx; // @[Decoupled.scala:51:35] wire [2:0] s0_req_0_uop_ldq_idx = _s0_req_T ? _s0_req_WIRE_0_uop_ldq_idx : _s0_req_T_5_0_uop_ldq_idx; // @[Decoupled.scala:51:35] wire [2:0] s0_req_0_uop_stq_idx = _s0_req_T ? _s0_req_WIRE_0_uop_stq_idx : _s0_req_T_5_0_uop_stq_idx; // @[Decoupled.scala:51:35] wire [1:0] s0_req_0_uop_rxq_idx = _s0_req_T ? _s0_req_WIRE_0_uop_rxq_idx : _s0_req_T_5_0_uop_rxq_idx; // @[Decoupled.scala:51:35] wire [5:0] s0_req_0_uop_pdst = _s0_req_T ? _s0_req_WIRE_0_uop_pdst : _s0_req_T_5_0_uop_pdst; // @[Decoupled.scala:51:35] wire [5:0] s0_req_0_uop_prs1 = _s0_req_T ? _s0_req_WIRE_0_uop_prs1 : _s0_req_T_5_0_uop_prs1; // @[Decoupled.scala:51:35] wire [5:0] s0_req_0_uop_prs2 = _s0_req_T ? _s0_req_WIRE_0_uop_prs2 : _s0_req_T_5_0_uop_prs2; // @[Decoupled.scala:51:35] wire [5:0] s0_req_0_uop_prs3 = _s0_req_T ? _s0_req_WIRE_0_uop_prs3 : _s0_req_T_5_0_uop_prs3; // @[Decoupled.scala:51:35] wire [3:0] s0_req_0_uop_ppred = _s0_req_T ? _s0_req_WIRE_0_uop_ppred : _s0_req_T_5_0_uop_ppred; // @[Decoupled.scala:51:35] wire s0_req_0_uop_prs1_busy = _s0_req_T ? _s0_req_WIRE_0_uop_prs1_busy : _s0_req_T_5_0_uop_prs1_busy; // @[Decoupled.scala:51:35] wire s0_req_0_uop_prs2_busy = _s0_req_T ? _s0_req_WIRE_0_uop_prs2_busy : _s0_req_T_5_0_uop_prs2_busy; // @[Decoupled.scala:51:35] wire s0_req_0_uop_prs3_busy = _s0_req_T ? _s0_req_WIRE_0_uop_prs3_busy : _s0_req_T_5_0_uop_prs3_busy; // @[Decoupled.scala:51:35] wire s0_req_0_uop_ppred_busy = _s0_req_T ? _s0_req_WIRE_0_uop_ppred_busy : _s0_req_T_5_0_uop_ppred_busy; // @[Decoupled.scala:51:35] wire [5:0] s0_req_0_uop_stale_pdst = _s0_req_T ? _s0_req_WIRE_0_uop_stale_pdst : _s0_req_T_5_0_uop_stale_pdst; // @[Decoupled.scala:51:35] wire s0_req_0_uop_exception = _s0_req_T ? _s0_req_WIRE_0_uop_exception : _s0_req_T_5_0_uop_exception; // @[Decoupled.scala:51:35] wire [63:0] s0_req_0_uop_exc_cause = _s0_req_T ? _s0_req_WIRE_0_uop_exc_cause : _s0_req_T_5_0_uop_exc_cause; // @[Decoupled.scala:51:35] wire s0_req_0_uop_bypassable = _s0_req_T ? _s0_req_WIRE_0_uop_bypassable : _s0_req_T_5_0_uop_bypassable; // @[Decoupled.scala:51:35] wire [4:0] s0_req_0_uop_mem_cmd = _s0_req_T ? _s0_req_WIRE_0_uop_mem_cmd : _s0_req_T_5_0_uop_mem_cmd; // @[Decoupled.scala:51:35] wire [1:0] s0_req_0_uop_mem_size = _s0_req_T ? _s0_req_WIRE_0_uop_mem_size : _s0_req_T_5_0_uop_mem_size; // @[Decoupled.scala:51:35] wire s0_req_0_uop_mem_signed = _s0_req_T ? _s0_req_WIRE_0_uop_mem_signed : _s0_req_T_5_0_uop_mem_signed; // @[Decoupled.scala:51:35] wire s0_req_0_uop_is_fence = _s0_req_T ? _s0_req_WIRE_0_uop_is_fence : _s0_req_T_5_0_uop_is_fence; // @[Decoupled.scala:51:35] wire s0_req_0_uop_is_fencei = _s0_req_T ? _s0_req_WIRE_0_uop_is_fencei : _s0_req_T_5_0_uop_is_fencei; // @[Decoupled.scala:51:35] wire s0_req_0_uop_is_amo = _s0_req_T ? _s0_req_WIRE_0_uop_is_amo : _s0_req_T_5_0_uop_is_amo; // @[Decoupled.scala:51:35] wire s0_req_0_uop_uses_ldq = _s0_req_T ? _s0_req_WIRE_0_uop_uses_ldq : _s0_req_T_5_0_uop_uses_ldq; // @[Decoupled.scala:51:35] wire s0_req_0_uop_uses_stq = _s0_req_T ? _s0_req_WIRE_0_uop_uses_stq : _s0_req_T_5_0_uop_uses_stq; // @[Decoupled.scala:51:35] wire s0_req_0_uop_is_sys_pc2epc = _s0_req_T ? _s0_req_WIRE_0_uop_is_sys_pc2epc : _s0_req_T_5_0_uop_is_sys_pc2epc; // @[Decoupled.scala:51:35] wire s0_req_0_uop_is_unique = _s0_req_T ? _s0_req_WIRE_0_uop_is_unique : _s0_req_T_5_0_uop_is_unique; // @[Decoupled.scala:51:35] wire s0_req_0_uop_flush_on_commit = _s0_req_T ? _s0_req_WIRE_0_uop_flush_on_commit : _s0_req_T_5_0_uop_flush_on_commit; // @[Decoupled.scala:51:35] wire s0_req_0_uop_ldst_is_rs1 = _s0_req_T ? _s0_req_WIRE_0_uop_ldst_is_rs1 : _s0_req_T_5_0_uop_ldst_is_rs1; // @[Decoupled.scala:51:35] wire [5:0] s0_req_0_uop_ldst = _s0_req_T ? _s0_req_WIRE_0_uop_ldst : _s0_req_T_5_0_uop_ldst; // @[Decoupled.scala:51:35] wire [5:0] s0_req_0_uop_lrs1 = _s0_req_T ? _s0_req_WIRE_0_uop_lrs1 : _s0_req_T_5_0_uop_lrs1; // @[Decoupled.scala:51:35] wire [5:0] s0_req_0_uop_lrs2 = _s0_req_T ? _s0_req_WIRE_0_uop_lrs2 : _s0_req_T_5_0_uop_lrs2; // @[Decoupled.scala:51:35] wire [5:0] s0_req_0_uop_lrs3 = _s0_req_T ? _s0_req_WIRE_0_uop_lrs3 : _s0_req_T_5_0_uop_lrs3; // @[Decoupled.scala:51:35] wire s0_req_0_uop_ldst_val = _s0_req_T ? _s0_req_WIRE_0_uop_ldst_val : _s0_req_T_5_0_uop_ldst_val; // @[Decoupled.scala:51:35] wire [1:0] s0_req_0_uop_dst_rtype = _s0_req_T ? _s0_req_WIRE_0_uop_dst_rtype : _s0_req_T_5_0_uop_dst_rtype; // @[Decoupled.scala:51:35] wire [1:0] s0_req_0_uop_lrs1_rtype = _s0_req_T ? _s0_req_WIRE_0_uop_lrs1_rtype : _s0_req_T_5_0_uop_lrs1_rtype; // @[Decoupled.scala:51:35] wire [1:0] s0_req_0_uop_lrs2_rtype = _s0_req_T ? _s0_req_WIRE_0_uop_lrs2_rtype : _s0_req_T_5_0_uop_lrs2_rtype; // @[Decoupled.scala:51:35] wire s0_req_0_uop_frs3_en = _s0_req_T ? _s0_req_WIRE_0_uop_frs3_en : _s0_req_T_5_0_uop_frs3_en; // @[Decoupled.scala:51:35] wire s0_req_0_uop_fp_val = _s0_req_T ? _s0_req_WIRE_0_uop_fp_val : _s0_req_T_5_0_uop_fp_val; // @[Decoupled.scala:51:35] wire s0_req_0_uop_fp_single = _s0_req_T ? _s0_req_WIRE_0_uop_fp_single : _s0_req_T_5_0_uop_fp_single; // @[Decoupled.scala:51:35] wire s0_req_0_uop_xcpt_pf_if = _s0_req_T ? _s0_req_WIRE_0_uop_xcpt_pf_if : _s0_req_T_5_0_uop_xcpt_pf_if; // @[Decoupled.scala:51:35] wire s0_req_0_uop_xcpt_ae_if = _s0_req_T ? _s0_req_WIRE_0_uop_xcpt_ae_if : _s0_req_T_5_0_uop_xcpt_ae_if; // @[Decoupled.scala:51:35] wire s0_req_0_uop_xcpt_ma_if = _s0_req_T ? _s0_req_WIRE_0_uop_xcpt_ma_if : _s0_req_T_5_0_uop_xcpt_ma_if; // @[Decoupled.scala:51:35] wire s0_req_0_uop_bp_debug_if = _s0_req_T ? _s0_req_WIRE_0_uop_bp_debug_if : _s0_req_T_5_0_uop_bp_debug_if; // @[Decoupled.scala:51:35] wire s0_req_0_uop_bp_xcpt_if = _s0_req_T ? _s0_req_WIRE_0_uop_bp_xcpt_if : _s0_req_T_5_0_uop_bp_xcpt_if; // @[Decoupled.scala:51:35] wire [1:0] s0_req_0_uop_debug_fsrc = _s0_req_T ? _s0_req_WIRE_0_uop_debug_fsrc : _s0_req_T_5_0_uop_debug_fsrc; // @[Decoupled.scala:51:35] wire [1:0] s0_req_0_uop_debug_tsrc = _s0_req_T ? _s0_req_WIRE_0_uop_debug_tsrc : _s0_req_T_5_0_uop_debug_tsrc; // @[Decoupled.scala:51:35] wire [39:0] s0_req_0_addr = _s0_req_T ? _s0_req_WIRE_0_addr : _s0_req_T_5_0_addr; // @[Decoupled.scala:51:35] wire [63:0] s0_req_0_data = _s0_req_T ? _s0_req_WIRE_0_data : _s0_req_T_5_0_data; // @[Decoupled.scala:51:35] wire s0_req_0_is_hella = _s0_req_T ? _s0_req_WIRE_0_is_hella : _s0_req_T_5_0_is_hella; // @[Decoupled.scala:51:35] wire [2:0] _s0_type_T_2 = _s0_type_T_1 ? 3'h3 : 3'h0; // @[Decoupled.scala:51:35] wire [2:0] _s0_type_T_3 = _s0_type_T_2; // @[dcache.scala:591:21, :592:21] wire [2:0] _s0_type_T_4 = prober_fire ? 3'h1 : _s0_type_T_3; // @[Decoupled.scala:51:35] wire [2:0] _s0_type_T_5 = wb_fire ? 3'h2 : _s0_type_T_4; // @[dcache.scala:531:38, :589:21, :590:21] wire [2:0] s0_type = _s0_type_T ? 3'h4 : _s0_type_T_5; // @[Decoupled.scala:51:35] wire _s0_send_resp_or_nack_T_2 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h0; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_3 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h10; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_4 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h6; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_5 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h7; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_6 = _s0_send_resp_or_nack_T_2 | _s0_send_resp_or_nack_T_3; // @[package.scala:16:47, :81:59] wire _s0_send_resp_or_nack_T_7 = _s0_send_resp_or_nack_T_6 | _s0_send_resp_or_nack_T_4; // @[package.scala:16:47, :81:59] wire _s0_send_resp_or_nack_T_8 = _s0_send_resp_or_nack_T_7 | _s0_send_resp_or_nack_T_5; // @[package.scala:16:47, :81:59] wire _s0_send_resp_or_nack_T_9 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h4; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_10 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h9; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_11 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hA; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_12 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hB; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_13 = _s0_send_resp_or_nack_T_9 | _s0_send_resp_or_nack_T_10; // @[package.scala:16:47, :81:59] wire _s0_send_resp_or_nack_T_14 = _s0_send_resp_or_nack_T_13 | _s0_send_resp_or_nack_T_11; // @[package.scala:16:47, :81:59] wire _s0_send_resp_or_nack_T_15 = _s0_send_resp_or_nack_T_14 | _s0_send_resp_or_nack_T_12; // @[package.scala:16:47, :81:59] wire _s0_send_resp_or_nack_T_16 = _mshrs_io_replay_bits_uop_mem_cmd == 5'h8; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_17 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hC; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_18 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hD; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_19 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hE; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_20 = _mshrs_io_replay_bits_uop_mem_cmd == 5'hF; // @[package.scala:16:47] wire _s0_send_resp_or_nack_T_21 = _s0_send_resp_or_nack_T_16 | _s0_send_resp_or_nack_T_17; // @[package.scala:16:47, :81:59] wire _s0_send_resp_or_nack_T_22 = _s0_send_resp_or_nack_T_21 | _s0_send_resp_or_nack_T_18; // @[package.scala:16:47, :81:59] wire _s0_send_resp_or_nack_T_23 = _s0_send_resp_or_nack_T_22 | _s0_send_resp_or_nack_T_19; // @[package.scala:16:47, :81:59] wire _s0_send_resp_or_nack_T_24 = _s0_send_resp_or_nack_T_23 | _s0_send_resp_or_nack_T_20; // @[package.scala:16:47, :81:59] wire _s0_send_resp_or_nack_T_25 = _s0_send_resp_or_nack_T_15 | _s0_send_resp_or_nack_T_24; // @[package.scala:81:59] wire _s0_send_resp_or_nack_T_26 = _s0_send_resp_or_nack_T_8 | _s0_send_resp_or_nack_T_25; // @[package.scala:81:59] wire _s0_send_resp_or_nack_T_27 = _s0_send_resp_or_nack_T_1 & _s0_send_resp_or_nack_T_26; // @[Decoupled.scala:51:35] wire _s0_send_resp_or_nack_T_28 = _s0_send_resp_or_nack_T_27; // @[dcache.scala:597:{16,38}] wire _s0_send_resp_or_nack_T_29 = _s0_send_resp_or_nack_T_28; // @[dcache.scala:597:{16,117}] wire _s0_send_resp_or_nack_WIRE_0 = _s0_send_resp_or_nack_T_29; // @[dcache.scala:597:{12,117}] wire s0_send_resp_or_nack_0 = _s0_send_resp_or_nack_T ? s0_valid_0 : _s0_send_resp_or_nack_WIRE_0; // @[Decoupled.scala:51:35] reg [6:0] s1_req_0_uop_uopc; // @[dcache.scala:600:32] reg [31:0] s1_req_0_uop_inst; // @[dcache.scala:600:32] reg [31:0] s1_req_0_uop_debug_inst; // @[dcache.scala:600:32] reg s1_req_0_uop_is_rvc; // @[dcache.scala:600:32] reg [39:0] s1_req_0_uop_debug_pc; // @[dcache.scala:600:32] reg [2:0] s1_req_0_uop_iq_type; // @[dcache.scala:600:32] reg [9:0] s1_req_0_uop_fu_code; // @[dcache.scala:600:32] reg [3:0] s1_req_0_uop_ctrl_br_type; // @[dcache.scala:600:32] reg [1:0] s1_req_0_uop_ctrl_op1_sel; // @[dcache.scala:600:32] reg [2:0] s1_req_0_uop_ctrl_op2_sel; // @[dcache.scala:600:32] reg [2:0] s1_req_0_uop_ctrl_imm_sel; // @[dcache.scala:600:32] reg [4:0] s1_req_0_uop_ctrl_op_fcn; // @[dcache.scala:600:32] reg s1_req_0_uop_ctrl_fcn_dw; // @[dcache.scala:600:32] reg [2:0] s1_req_0_uop_ctrl_csr_cmd; // @[dcache.scala:600:32] reg s1_req_0_uop_ctrl_is_load; // @[dcache.scala:600:32] reg s1_req_0_uop_ctrl_is_sta; // @[dcache.scala:600:32] reg s1_req_0_uop_ctrl_is_std; // @[dcache.scala:600:32] reg [1:0] s1_req_0_uop_iw_state; // @[dcache.scala:600:32] reg s1_req_0_uop_iw_p1_poisoned; // @[dcache.scala:600:32] reg s1_req_0_uop_iw_p2_poisoned; // @[dcache.scala:600:32] reg s1_req_0_uop_is_br; // @[dcache.scala:600:32] reg s1_req_0_uop_is_jalr; // @[dcache.scala:600:32] reg s1_req_0_uop_is_jal; // @[dcache.scala:600:32] reg s1_req_0_uop_is_sfb; // @[dcache.scala:600:32] reg [7:0] s1_req_0_uop_br_mask; // @[dcache.scala:600:32] reg [2:0] s1_req_0_uop_br_tag; // @[dcache.scala:600:32] reg [3:0] s1_req_0_uop_ftq_idx; // @[dcache.scala:600:32] reg s1_req_0_uop_edge_inst; // @[dcache.scala:600:32] reg [5:0] s1_req_0_uop_pc_lob; // @[dcache.scala:600:32] reg s1_req_0_uop_taken; // @[dcache.scala:600:32] reg [19:0] s1_req_0_uop_imm_packed; // @[dcache.scala:600:32] reg [11:0] s1_req_0_uop_csr_addr; // @[dcache.scala:600:32] reg [4:0] s1_req_0_uop_rob_idx; // @[dcache.scala:600:32] reg [2:0] s1_req_0_uop_ldq_idx; // @[dcache.scala:600:32] reg [2:0] s1_req_0_uop_stq_idx; // @[dcache.scala:600:32] reg [1:0] s1_req_0_uop_rxq_idx; // @[dcache.scala:600:32] reg [5:0] s1_req_0_uop_pdst; // @[dcache.scala:600:32] reg [5:0] s1_req_0_uop_prs1; // @[dcache.scala:600:32] reg [5:0] s1_req_0_uop_prs2; // @[dcache.scala:600:32] reg [5:0] s1_req_0_uop_prs3; // @[dcache.scala:600:32] reg [3:0] s1_req_0_uop_ppred; // @[dcache.scala:600:32] reg s1_req_0_uop_prs1_busy; // @[dcache.scala:600:32] reg s1_req_0_uop_prs2_busy; // @[dcache.scala:600:32] reg s1_req_0_uop_prs3_busy; // @[dcache.scala:600:32] reg s1_req_0_uop_ppred_busy; // @[dcache.scala:600:32] reg [5:0] s1_req_0_uop_stale_pdst; // @[dcache.scala:600:32] reg s1_req_0_uop_exception; // @[dcache.scala:600:32] reg [63:0] s1_req_0_uop_exc_cause; // @[dcache.scala:600:32] reg s1_req_0_uop_bypassable; // @[dcache.scala:600:32] reg [4:0] s1_req_0_uop_mem_cmd; // @[dcache.scala:600:32] reg [1:0] s1_req_0_uop_mem_size; // @[dcache.scala:600:32] reg s1_req_0_uop_mem_signed; // @[dcache.scala:600:32] reg s1_req_0_uop_is_fence; // @[dcache.scala:600:32] reg s1_req_0_uop_is_fencei; // @[dcache.scala:600:32] reg s1_req_0_uop_is_amo; // @[dcache.scala:600:32] reg s1_req_0_uop_uses_ldq; // @[dcache.scala:600:32] reg s1_req_0_uop_uses_stq; // @[dcache.scala:600:32] reg s1_req_0_uop_is_sys_pc2epc; // @[dcache.scala:600:32] reg s1_req_0_uop_is_unique; // @[dcache.scala:600:32] reg s1_req_0_uop_flush_on_commit; // @[dcache.scala:600:32] reg s1_req_0_uop_ldst_is_rs1; // @[dcache.scala:600:32] reg [5:0] s1_req_0_uop_ldst; // @[dcache.scala:600:32] reg [5:0] s1_req_0_uop_lrs1; // @[dcache.scala:600:32] reg [5:0] s1_req_0_uop_lrs2; // @[dcache.scala:600:32] reg [5:0] s1_req_0_uop_lrs3; // @[dcache.scala:600:32] reg s1_req_0_uop_ldst_val; // @[dcache.scala:600:32] reg [1:0] s1_req_0_uop_dst_rtype; // @[dcache.scala:600:32] reg [1:0] s1_req_0_uop_lrs1_rtype; // @[dcache.scala:600:32] reg [1:0] s1_req_0_uop_lrs2_rtype; // @[dcache.scala:600:32] reg s1_req_0_uop_frs3_en; // @[dcache.scala:600:32] reg s1_req_0_uop_fp_val; // @[dcache.scala:600:32] reg s1_req_0_uop_fp_single; // @[dcache.scala:600:32] reg s1_req_0_uop_xcpt_pf_if; // @[dcache.scala:600:32] reg s1_req_0_uop_xcpt_ae_if; // @[dcache.scala:600:32] reg s1_req_0_uop_xcpt_ma_if; // @[dcache.scala:600:32] reg s1_req_0_uop_bp_debug_if; // @[dcache.scala:600:32] reg s1_req_0_uop_bp_xcpt_if; // @[dcache.scala:600:32] reg [1:0] s1_req_0_uop_debug_fsrc; // @[dcache.scala:600:32] reg [1:0] s1_req_0_uop_debug_tsrc; // @[dcache.scala:600:32] reg [39:0] s1_req_0_addr; // @[dcache.scala:600:32] reg [63:0] s1_req_0_data; // @[dcache.scala:600:32] reg s1_req_0_is_hella; // @[dcache.scala:600:32] wire [7:0] _s1_req_0_uop_br_mask_T = ~io_lsu_brupdate_b1_resolve_mask_0; // @[util.scala:85:27] wire [7:0] _s1_req_0_uop_br_mask_T_1 = s0_req_0_uop_br_mask & _s1_req_0_uop_br_mask_T; // @[util.scala:85:{25,27}] wire _s2_store_failed_T_2; // @[dcache.scala:742:67] wire s2_store_failed; // @[dcache.scala:603:29] wire [7:0] _s1_valid_T = io_lsu_brupdate_b1_mispredict_mask_0 & s0_req_0_uop_br_mask; // @[util.scala:118:51] wire _s1_valid_T_1 = |_s1_valid_T; // @[util.scala:118:{51,59}] wire _s1_valid_T_2 = ~_s1_valid_T_1; // @[util.scala:118:59] wire _s1_valid_T_3 = s0_valid_0 & _s1_valid_T_2; // @[dcache.scala:579:21, :605:74, :606:26] wire _s1_valid_T_4 = io_lsu_exception_0 & s0_req_0_uop_uses_ldq; // @[dcache.scala:413:7, :582:21, :607:45] wire _s1_valid_T_5 = ~_s1_valid_T_4; // @[dcache.scala:607:{26,45}] wire _s1_valid_T_6 = _s1_valid_T_3 & _s1_valid_T_5; // @[dcache.scala:605:74, :606:76, :607:26] wire _s1_valid_T_8 = s2_store_failed & _s1_valid_T_7; // @[Decoupled.scala:51:35] wire _s1_valid_T_9 = _s1_valid_T_8 & s0_req_0_uop_uses_stq; // @[dcache.scala:582:21, :608:{44,63}] wire _s1_valid_T_10 = ~_s1_valid_T_9; // @[dcache.scala:608:{26,63}] wire _s1_valid_T_11 = _s1_valid_T_6 & _s1_valid_T_10; // @[dcache.scala:606:76, :607:74, :608:26] reg s1_valid_REG; // @[dcache.scala:605:25] wire s1_valid_0 = s1_valid_REG; // @[dcache.scala:427:49, :605:25] reg REG; // @[dcache.scala:611:43] reg REG_1; // @[dcache.scala:611:72] wire [5:0] _s1_nack_T = s1_req_0_addr[11:6]; // @[dcache.scala:600:32, :613:43] wire [5:0] _s1_wb_idx_matches_T = s1_req_0_addr[11:6]; // @[dcache.scala:600:32, :613:43, :630:52] wire _s1_nack_T_1 = _s1_nack_T == _prober_io_meta_write_bits_idx; // @[dcache.scala:432:22, :613:{43,59}] wire _s1_nack_T_2 = ~_prober_io_req_ready; // @[dcache.scala:432:22, :613:96] wire s1_nack_0 = _s1_nack_T_1 & _s1_nack_T_2; // @[dcache.scala:613:{59,93,96}] wire _s2_nack_hit_WIRE_0 = s1_nack_0; // @[dcache.scala:613:93, :722:39] reg s1_send_resp_or_nack_0; // @[dcache.scala:614:37] reg [2:0] s1_type; // @[dcache.scala:615:32] reg [3:0] s1_mshr_meta_read_way_en; // @[dcache.scala:617:41] reg [3:0] s1_replay_way_en; // @[dcache.scala:618:41] reg [3:0] s1_wb_way_en; // @[dcache.scala:619:41] wire [27:0] _s1_tag_eq_way_T = s1_req_0_addr[39:12]; // @[dcache.scala:600:32, :623:95] wire [27:0] _s1_tag_eq_way_T_2 = s1_req_0_addr[39:12]; // @[dcache.scala:600:32, :623:95] wire [27:0] _s1_tag_eq_way_T_4 = s1_req_0_addr[39:12]; // @[dcache.scala:600:32, :623:95] wire [27:0] _s1_tag_eq_way_T_6 = s1_req_0_addr[39:12]; // @[dcache.scala:600:32, :623:95] wire _s1_tag_eq_way_T_1 = {8'h0, _meta_0_io_resp_0_tag} == _s1_tag_eq_way_T; // @[dcache.scala:442:41, :623:{79,95}] wire _s1_tag_eq_way_WIRE_0 = _s1_tag_eq_way_T_1; // @[dcache.scala:622:47, :623:79] wire _s1_tag_eq_way_T_3 = {8'h0, _meta_0_io_resp_1_tag} == _s1_tag_eq_way_T_2; // @[dcache.scala:442:41, :623:{79,95}] wire _s1_tag_eq_way_WIRE_1 = _s1_tag_eq_way_T_3; // @[dcache.scala:622:47, :623:79] wire _s1_tag_eq_way_T_5 = {8'h0, _meta_0_io_resp_2_tag} == _s1_tag_eq_way_T_4; // @[dcache.scala:442:41, :623:{79,95}] wire _s1_tag_eq_way_WIRE_2 = _s1_tag_eq_way_T_5; // @[dcache.scala:622:47, :623:79] wire _s1_tag_eq_way_T_7 = {8'h0, _meta_0_io_resp_3_tag} == _s1_tag_eq_way_T_6; // @[dcache.scala:442:41, :623:{79,95}] wire _s1_tag_eq_way_WIRE_3 = _s1_tag_eq_way_T_7; // @[dcache.scala:622:47, :623:79] wire [1:0] s1_tag_eq_way_lo = {_s1_tag_eq_way_WIRE_1, _s1_tag_eq_way_WIRE_0}; // @[dcache.scala:622:47, :623:110] wire [1:0] s1_tag_eq_way_hi = {_s1_tag_eq_way_WIRE_3, _s1_tag_eq_way_WIRE_2}; // @[dcache.scala:622:47, :623:110] wire [3:0] _s1_tag_eq_way_T_8 = {s1_tag_eq_way_hi, s1_tag_eq_way_lo}; // @[dcache.scala:623:110] wire [3:0] s1_tag_eq_way_0 = _s1_tag_eq_way_T_8; // @[dcache.scala:427:49, :623:110] wire _s1_tag_match_way_T = s1_type == 3'h0; // @[dcache.scala:615:32, :625:38] wire _s1_tag_match_way_T_1 = s1_type == 3'h2; // @[dcache.scala:615:32, :626:38] wire _s1_tag_match_way_T_2 = s1_type == 3'h3; // @[dcache.scala:615:32, :627:38] wire _s1_tag_match_way_T_3 = s1_tag_eq_way_0[0]; // @[dcache.scala:427:49, :628:63] wire _s1_tag_match_way_T_4 = |_meta_0_io_resp_0_coh_state; // @[Metadata.scala:50:45] wire _s1_tag_match_way_T_5 = _s1_tag_match_way_T_3 & _s1_tag_match_way_T_4; // @[Metadata.scala:50:45] wire _s1_tag_match_way_WIRE_0 = _s1_tag_match_way_T_5; // @[dcache.scala:622:47, :628:67] wire _s1_tag_match_way_T_6 = s1_tag_eq_way_0[1]; // @[dcache.scala:427:49, :628:63] wire _s1_tag_match_way_T_7 = |_meta_0_io_resp_1_coh_state; // @[Metadata.scala:50:45] wire _s1_tag_match_way_T_8 = _s1_tag_match_way_T_6 & _s1_tag_match_way_T_7; // @[Metadata.scala:50:45] wire _s1_tag_match_way_WIRE_1 = _s1_tag_match_way_T_8; // @[dcache.scala:622:47, :628:67] wire _s1_tag_match_way_T_9 = s1_tag_eq_way_0[2]; // @[dcache.scala:427:49, :628:63] wire _s1_tag_match_way_T_10 = |_meta_0_io_resp_2_coh_state; // @[Metadata.scala:50:45] wire _s1_tag_match_way_T_11 = _s1_tag_match_way_T_9 & _s1_tag_match_way_T_10; // @[Metadata.scala:50:45] wire _s1_tag_match_way_WIRE_2 = _s1_tag_match_way_T_11; // @[dcache.scala:622:47, :628:67] wire _s1_tag_match_way_T_12 = s1_tag_eq_way_0[3]; // @[dcache.scala:427:49, :628:63] wire _s1_tag_match_way_T_13 = |_meta_0_io_resp_3_coh_state; // @[Metadata.scala:50:45] wire _s1_tag_match_way_T_14 = _s1_tag_match_way_T_12 & _s1_tag_match_way_T_13; // @[Metadata.scala:50:45] wire _s1_tag_match_way_WIRE_3 = _s1_tag_match_way_T_14; // @[dcache.scala:622:47, :628:67] wire [1:0] s1_tag_match_way_lo = {_s1_tag_match_way_WIRE_1, _s1_tag_match_way_WIRE_0}; // @[dcache.scala:622:47, :628:104] wire [1:0] s1_tag_match_way_hi = {_s1_tag_match_way_WIRE_3, _s1_tag_match_way_WIRE_2}; // @[dcache.scala:622:47, :628:104] wire [3:0] _s1_tag_match_way_T_15 = {s1_tag_match_way_hi, s1_tag_match_way_lo}; // @[dcache.scala:628:104] wire [3:0] _s1_tag_match_way_T_16 = _s1_tag_match_way_T_2 ? s1_mshr_meta_read_way_en : _s1_tag_match_way_T_15; // @[dcache.scala:617:41, :627:{29,38}, :628:104] wire [3:0] _s1_tag_match_way_T_17 = _s1_tag_match_way_T_1 ? s1_wb_way_en : _s1_tag_match_way_T_16; // @[dcache.scala:619:41, :626:{29,38}, :627:29] wire [3:0] _s1_tag_match_way_T_18 = _s1_tag_match_way_T ? s1_replay_way_en : _s1_tag_match_way_T_17; // @[dcache.scala:618:41, :625:{29,38}, :626:29] wire [3:0] s1_tag_match_way_0 = _s1_tag_match_way_T_18; // @[dcache.scala:427:49, :625:29] wire _s1_wb_idx_matches_T_1 = _s1_wb_idx_matches_T == _wb_io_idx_bits; // @[dcache.scala:431:18, :630:{52,79}] wire _s1_wb_idx_matches_T_2 = _s1_wb_idx_matches_T_1 & _wb_io_idx_valid; // @[dcache.scala:431:18, :630:{79,99}] wire s1_wb_idx_matches_0 = _s1_wb_idx_matches_T_2; // @[dcache.scala:427:49, :630:99] reg [6:0] s2_req_0_uop_uopc; // @[dcache.scala:632:25] wire [6:0] cache_resp_0_bits_uop_uopc = s2_req_0_uop_uopc; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_uopc = s2_req_0_uop_uopc; // @[util.scala:101:23] reg [31:0] s2_req_0_uop_inst; // @[dcache.scala:632:25] wire [31:0] cache_resp_0_bits_uop_inst = s2_req_0_uop_inst; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_inst = s2_req_0_uop_inst; // @[util.scala:101:23] reg [31:0] s2_req_0_uop_debug_inst; // @[dcache.scala:632:25] wire [31:0] cache_resp_0_bits_uop_debug_inst = s2_req_0_uop_debug_inst; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_debug_inst = s2_req_0_uop_debug_inst; // @[util.scala:101:23] reg s2_req_0_uop_is_rvc; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_is_rvc = s2_req_0_uop_is_rvc; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_is_rvc = s2_req_0_uop_is_rvc; // @[util.scala:101:23] reg [39:0] s2_req_0_uop_debug_pc; // @[dcache.scala:632:25] wire [39:0] cache_resp_0_bits_uop_debug_pc = s2_req_0_uop_debug_pc; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_debug_pc = s2_req_0_uop_debug_pc; // @[util.scala:101:23] reg [2:0] s2_req_0_uop_iq_type; // @[dcache.scala:632:25] wire [2:0] cache_resp_0_bits_uop_iq_type = s2_req_0_uop_iq_type; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_iq_type = s2_req_0_uop_iq_type; // @[util.scala:101:23] reg [9:0] s2_req_0_uop_fu_code; // @[dcache.scala:632:25] wire [9:0] cache_resp_0_bits_uop_fu_code = s2_req_0_uop_fu_code; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_fu_code = s2_req_0_uop_fu_code; // @[util.scala:101:23] reg [3:0] s2_req_0_uop_ctrl_br_type; // @[dcache.scala:632:25] wire [3:0] cache_resp_0_bits_uop_ctrl_br_type = s2_req_0_uop_ctrl_br_type; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ctrl_br_type = s2_req_0_uop_ctrl_br_type; // @[util.scala:101:23] reg [1:0] s2_req_0_uop_ctrl_op1_sel; // @[dcache.scala:632:25] wire [1:0] cache_resp_0_bits_uop_ctrl_op1_sel = s2_req_0_uop_ctrl_op1_sel; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ctrl_op1_sel = s2_req_0_uop_ctrl_op1_sel; // @[util.scala:101:23] reg [2:0] s2_req_0_uop_ctrl_op2_sel; // @[dcache.scala:632:25] wire [2:0] cache_resp_0_bits_uop_ctrl_op2_sel = s2_req_0_uop_ctrl_op2_sel; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ctrl_op2_sel = s2_req_0_uop_ctrl_op2_sel; // @[util.scala:101:23] reg [2:0] s2_req_0_uop_ctrl_imm_sel; // @[dcache.scala:632:25] wire [2:0] cache_resp_0_bits_uop_ctrl_imm_sel = s2_req_0_uop_ctrl_imm_sel; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ctrl_imm_sel = s2_req_0_uop_ctrl_imm_sel; // @[util.scala:101:23] reg [4:0] s2_req_0_uop_ctrl_op_fcn; // @[dcache.scala:632:25] wire [4:0] cache_resp_0_bits_uop_ctrl_op_fcn = s2_req_0_uop_ctrl_op_fcn; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ctrl_op_fcn = s2_req_0_uop_ctrl_op_fcn; // @[util.scala:101:23] reg s2_req_0_uop_ctrl_fcn_dw; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_ctrl_fcn_dw = s2_req_0_uop_ctrl_fcn_dw; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ctrl_fcn_dw = s2_req_0_uop_ctrl_fcn_dw; // @[util.scala:101:23] reg [2:0] s2_req_0_uop_ctrl_csr_cmd; // @[dcache.scala:632:25] wire [2:0] cache_resp_0_bits_uop_ctrl_csr_cmd = s2_req_0_uop_ctrl_csr_cmd; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ctrl_csr_cmd = s2_req_0_uop_ctrl_csr_cmd; // @[util.scala:101:23] reg s2_req_0_uop_ctrl_is_load; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_ctrl_is_load = s2_req_0_uop_ctrl_is_load; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ctrl_is_load = s2_req_0_uop_ctrl_is_load; // @[util.scala:101:23] reg s2_req_0_uop_ctrl_is_sta; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_ctrl_is_sta = s2_req_0_uop_ctrl_is_sta; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ctrl_is_sta = s2_req_0_uop_ctrl_is_sta; // @[util.scala:101:23] reg s2_req_0_uop_ctrl_is_std; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_ctrl_is_std = s2_req_0_uop_ctrl_is_std; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ctrl_is_std = s2_req_0_uop_ctrl_is_std; // @[util.scala:101:23] reg [1:0] s2_req_0_uop_iw_state; // @[dcache.scala:632:25] wire [1:0] cache_resp_0_bits_uop_iw_state = s2_req_0_uop_iw_state; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_iw_state = s2_req_0_uop_iw_state; // @[util.scala:101:23] reg s2_req_0_uop_iw_p1_poisoned; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_iw_p1_poisoned = s2_req_0_uop_iw_p1_poisoned; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_iw_p1_poisoned = s2_req_0_uop_iw_p1_poisoned; // @[util.scala:101:23] reg s2_req_0_uop_iw_p2_poisoned; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_iw_p2_poisoned = s2_req_0_uop_iw_p2_poisoned; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_iw_p2_poisoned = s2_req_0_uop_iw_p2_poisoned; // @[util.scala:101:23] reg s2_req_0_uop_is_br; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_is_br = s2_req_0_uop_is_br; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_is_br = s2_req_0_uop_is_br; // @[util.scala:101:23] reg s2_req_0_uop_is_jalr; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_is_jalr = s2_req_0_uop_is_jalr; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_is_jalr = s2_req_0_uop_is_jalr; // @[util.scala:101:23] reg s2_req_0_uop_is_jal; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_is_jal = s2_req_0_uop_is_jal; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_is_jal = s2_req_0_uop_is_jal; // @[util.scala:101:23] reg s2_req_0_uop_is_sfb; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_is_sfb = s2_req_0_uop_is_sfb; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_is_sfb = s2_req_0_uop_is_sfb; // @[util.scala:101:23] reg [7:0] s2_req_0_uop_br_mask; // @[dcache.scala:632:25] wire [7:0] cache_resp_0_bits_uop_br_mask = s2_req_0_uop_br_mask; // @[dcache.scala:632:25, :833:26] reg [2:0] s2_req_0_uop_br_tag; // @[dcache.scala:632:25] wire [2:0] cache_resp_0_bits_uop_br_tag = s2_req_0_uop_br_tag; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_br_tag = s2_req_0_uop_br_tag; // @[util.scala:101:23] reg [3:0] s2_req_0_uop_ftq_idx; // @[dcache.scala:632:25] wire [3:0] cache_resp_0_bits_uop_ftq_idx = s2_req_0_uop_ftq_idx; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ftq_idx = s2_req_0_uop_ftq_idx; // @[util.scala:101:23] reg s2_req_0_uop_edge_inst; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_edge_inst = s2_req_0_uop_edge_inst; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_edge_inst = s2_req_0_uop_edge_inst; // @[util.scala:101:23] reg [5:0] s2_req_0_uop_pc_lob; // @[dcache.scala:632:25] wire [5:0] cache_resp_0_bits_uop_pc_lob = s2_req_0_uop_pc_lob; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_pc_lob = s2_req_0_uop_pc_lob; // @[util.scala:101:23] reg s2_req_0_uop_taken; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_taken = s2_req_0_uop_taken; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_taken = s2_req_0_uop_taken; // @[util.scala:101:23] reg [19:0] s2_req_0_uop_imm_packed; // @[dcache.scala:632:25] wire [19:0] cache_resp_0_bits_uop_imm_packed = s2_req_0_uop_imm_packed; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_imm_packed = s2_req_0_uop_imm_packed; // @[util.scala:101:23] reg [11:0] s2_req_0_uop_csr_addr; // @[dcache.scala:632:25] wire [11:0] cache_resp_0_bits_uop_csr_addr = s2_req_0_uop_csr_addr; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_csr_addr = s2_req_0_uop_csr_addr; // @[util.scala:101:23] reg [4:0] s2_req_0_uop_rob_idx; // @[dcache.scala:632:25] wire [4:0] cache_resp_0_bits_uop_rob_idx = s2_req_0_uop_rob_idx; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_rob_idx = s2_req_0_uop_rob_idx; // @[util.scala:101:23] reg [2:0] s2_req_0_uop_ldq_idx; // @[dcache.scala:632:25] wire [2:0] cache_resp_0_bits_uop_ldq_idx = s2_req_0_uop_ldq_idx; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ldq_idx = s2_req_0_uop_ldq_idx; // @[util.scala:101:23] reg [2:0] s2_req_0_uop_stq_idx; // @[dcache.scala:632:25] wire [2:0] cache_resp_0_bits_uop_stq_idx = s2_req_0_uop_stq_idx; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_stq_idx = s2_req_0_uop_stq_idx; // @[util.scala:101:23] reg [1:0] s2_req_0_uop_rxq_idx; // @[dcache.scala:632:25] wire [1:0] cache_resp_0_bits_uop_rxq_idx = s2_req_0_uop_rxq_idx; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_rxq_idx = s2_req_0_uop_rxq_idx; // @[util.scala:101:23] reg [5:0] s2_req_0_uop_pdst; // @[dcache.scala:632:25] wire [5:0] cache_resp_0_bits_uop_pdst = s2_req_0_uop_pdst; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_pdst = s2_req_0_uop_pdst; // @[util.scala:101:23] reg [5:0] s2_req_0_uop_prs1; // @[dcache.scala:632:25] wire [5:0] cache_resp_0_bits_uop_prs1 = s2_req_0_uop_prs1; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_prs1 = s2_req_0_uop_prs1; // @[util.scala:101:23] reg [5:0] s2_req_0_uop_prs2; // @[dcache.scala:632:25] wire [5:0] cache_resp_0_bits_uop_prs2 = s2_req_0_uop_prs2; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_prs2 = s2_req_0_uop_prs2; // @[util.scala:101:23] reg [5:0] s2_req_0_uop_prs3; // @[dcache.scala:632:25] wire [5:0] cache_resp_0_bits_uop_prs3 = s2_req_0_uop_prs3; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_prs3 = s2_req_0_uop_prs3; // @[util.scala:101:23] reg [3:0] s2_req_0_uop_ppred; // @[dcache.scala:632:25] wire [3:0] cache_resp_0_bits_uop_ppred = s2_req_0_uop_ppred; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ppred = s2_req_0_uop_ppred; // @[util.scala:101:23] reg s2_req_0_uop_prs1_busy; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_prs1_busy = s2_req_0_uop_prs1_busy; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_prs1_busy = s2_req_0_uop_prs1_busy; // @[util.scala:101:23] reg s2_req_0_uop_prs2_busy; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_prs2_busy = s2_req_0_uop_prs2_busy; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_prs2_busy = s2_req_0_uop_prs2_busy; // @[util.scala:101:23] reg s2_req_0_uop_prs3_busy; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_prs3_busy = s2_req_0_uop_prs3_busy; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_prs3_busy = s2_req_0_uop_prs3_busy; // @[util.scala:101:23] reg s2_req_0_uop_ppred_busy; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_ppred_busy = s2_req_0_uop_ppred_busy; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ppred_busy = s2_req_0_uop_ppred_busy; // @[util.scala:101:23] reg [5:0] s2_req_0_uop_stale_pdst; // @[dcache.scala:632:25] wire [5:0] cache_resp_0_bits_uop_stale_pdst = s2_req_0_uop_stale_pdst; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_stale_pdst = s2_req_0_uop_stale_pdst; // @[util.scala:101:23] reg s2_req_0_uop_exception; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_exception = s2_req_0_uop_exception; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_exception = s2_req_0_uop_exception; // @[util.scala:101:23] reg [63:0] s2_req_0_uop_exc_cause; // @[dcache.scala:632:25] wire [63:0] cache_resp_0_bits_uop_exc_cause = s2_req_0_uop_exc_cause; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_exc_cause = s2_req_0_uop_exc_cause; // @[util.scala:101:23] reg s2_req_0_uop_bypassable; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_bypassable = s2_req_0_uop_bypassable; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_bypassable = s2_req_0_uop_bypassable; // @[util.scala:101:23] reg [4:0] s2_req_0_uop_mem_cmd; // @[dcache.scala:632:25] wire [4:0] cache_resp_0_bits_uop_mem_cmd = s2_req_0_uop_mem_cmd; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_mem_cmd = s2_req_0_uop_mem_cmd; // @[util.scala:101:23] reg [1:0] s2_req_0_uop_mem_size; // @[dcache.scala:632:25] wire [1:0] size = s2_req_0_uop_mem_size; // @[AMOALU.scala:11:18] wire [1:0] cache_resp_0_bits_uop_mem_size = s2_req_0_uop_mem_size; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_mem_size = s2_req_0_uop_mem_size; // @[util.scala:101:23] wire [1:0] amoalu_io_mask_size = s2_req_0_uop_mem_size; // @[AMOALU.scala:11:18] reg s2_req_0_uop_mem_signed; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_mem_signed = s2_req_0_uop_mem_signed; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_mem_signed = s2_req_0_uop_mem_signed; // @[util.scala:101:23] reg s2_req_0_uop_is_fence; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_is_fence = s2_req_0_uop_is_fence; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_is_fence = s2_req_0_uop_is_fence; // @[util.scala:101:23] reg s2_req_0_uop_is_fencei; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_is_fencei = s2_req_0_uop_is_fencei; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_is_fencei = s2_req_0_uop_is_fencei; // @[util.scala:101:23] reg s2_req_0_uop_is_amo; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_is_amo = s2_req_0_uop_is_amo; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_is_amo = s2_req_0_uop_is_amo; // @[util.scala:101:23] reg s2_req_0_uop_uses_ldq; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_uses_ldq = s2_req_0_uop_uses_ldq; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_uses_ldq = s2_req_0_uop_uses_ldq; // @[util.scala:101:23] reg s2_req_0_uop_uses_stq; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_uses_stq = s2_req_0_uop_uses_stq; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_uses_stq = s2_req_0_uop_uses_stq; // @[util.scala:101:23] reg s2_req_0_uop_is_sys_pc2epc; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_is_sys_pc2epc = s2_req_0_uop_is_sys_pc2epc; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_is_sys_pc2epc = s2_req_0_uop_is_sys_pc2epc; // @[util.scala:101:23] reg s2_req_0_uop_is_unique; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_is_unique = s2_req_0_uop_is_unique; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_is_unique = s2_req_0_uop_is_unique; // @[util.scala:101:23] reg s2_req_0_uop_flush_on_commit; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_flush_on_commit = s2_req_0_uop_flush_on_commit; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_flush_on_commit = s2_req_0_uop_flush_on_commit; // @[util.scala:101:23] reg s2_req_0_uop_ldst_is_rs1; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_ldst_is_rs1 = s2_req_0_uop_ldst_is_rs1; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ldst_is_rs1 = s2_req_0_uop_ldst_is_rs1; // @[util.scala:101:23] reg [5:0] s2_req_0_uop_ldst; // @[dcache.scala:632:25] wire [5:0] cache_resp_0_bits_uop_ldst = s2_req_0_uop_ldst; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ldst = s2_req_0_uop_ldst; // @[util.scala:101:23] reg [5:0] s2_req_0_uop_lrs1; // @[dcache.scala:632:25] wire [5:0] cache_resp_0_bits_uop_lrs1 = s2_req_0_uop_lrs1; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_lrs1 = s2_req_0_uop_lrs1; // @[util.scala:101:23] reg [5:0] s2_req_0_uop_lrs2; // @[dcache.scala:632:25] wire [5:0] cache_resp_0_bits_uop_lrs2 = s2_req_0_uop_lrs2; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_lrs2 = s2_req_0_uop_lrs2; // @[util.scala:101:23] reg [5:0] s2_req_0_uop_lrs3; // @[dcache.scala:632:25] wire [5:0] cache_resp_0_bits_uop_lrs3 = s2_req_0_uop_lrs3; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_lrs3 = s2_req_0_uop_lrs3; // @[util.scala:101:23] reg s2_req_0_uop_ldst_val; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_ldst_val = s2_req_0_uop_ldst_val; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_ldst_val = s2_req_0_uop_ldst_val; // @[util.scala:101:23] reg [1:0] s2_req_0_uop_dst_rtype; // @[dcache.scala:632:25] wire [1:0] cache_resp_0_bits_uop_dst_rtype = s2_req_0_uop_dst_rtype; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_dst_rtype = s2_req_0_uop_dst_rtype; // @[util.scala:101:23] reg [1:0] s2_req_0_uop_lrs1_rtype; // @[dcache.scala:632:25] wire [1:0] cache_resp_0_bits_uop_lrs1_rtype = s2_req_0_uop_lrs1_rtype; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_lrs1_rtype = s2_req_0_uop_lrs1_rtype; // @[util.scala:101:23] reg [1:0] s2_req_0_uop_lrs2_rtype; // @[dcache.scala:632:25] wire [1:0] cache_resp_0_bits_uop_lrs2_rtype = s2_req_0_uop_lrs2_rtype; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_lrs2_rtype = s2_req_0_uop_lrs2_rtype; // @[util.scala:101:23] reg s2_req_0_uop_frs3_en; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_frs3_en = s2_req_0_uop_frs3_en; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_frs3_en = s2_req_0_uop_frs3_en; // @[util.scala:101:23] reg s2_req_0_uop_fp_val; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_fp_val = s2_req_0_uop_fp_val; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_fp_val = s2_req_0_uop_fp_val; // @[util.scala:101:23] reg s2_req_0_uop_fp_single; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_fp_single = s2_req_0_uop_fp_single; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_fp_single = s2_req_0_uop_fp_single; // @[util.scala:101:23] reg s2_req_0_uop_xcpt_pf_if; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_xcpt_pf_if = s2_req_0_uop_xcpt_pf_if; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_xcpt_pf_if = s2_req_0_uop_xcpt_pf_if; // @[util.scala:101:23] reg s2_req_0_uop_xcpt_ae_if; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_xcpt_ae_if = s2_req_0_uop_xcpt_ae_if; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_xcpt_ae_if = s2_req_0_uop_xcpt_ae_if; // @[util.scala:101:23] reg s2_req_0_uop_xcpt_ma_if; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_xcpt_ma_if = s2_req_0_uop_xcpt_ma_if; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_xcpt_ma_if = s2_req_0_uop_xcpt_ma_if; // @[util.scala:101:23] reg s2_req_0_uop_bp_debug_if; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_bp_debug_if = s2_req_0_uop_bp_debug_if; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_bp_debug_if = s2_req_0_uop_bp_debug_if; // @[util.scala:101:23] reg s2_req_0_uop_bp_xcpt_if; // @[dcache.scala:632:25] wire cache_resp_0_bits_uop_bp_xcpt_if = s2_req_0_uop_bp_xcpt_if; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_bp_xcpt_if = s2_req_0_uop_bp_xcpt_if; // @[util.scala:101:23] reg [1:0] s2_req_0_uop_debug_fsrc; // @[dcache.scala:632:25] wire [1:0] cache_resp_0_bits_uop_debug_fsrc = s2_req_0_uop_debug_fsrc; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_debug_fsrc = s2_req_0_uop_debug_fsrc; // @[util.scala:101:23] reg [1:0] s2_req_0_uop_debug_tsrc; // @[dcache.scala:632:25] wire [1:0] cache_resp_0_bits_uop_debug_tsrc = s2_req_0_uop_debug_tsrc; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_uop_debug_tsrc = s2_req_0_uop_debug_tsrc; // @[util.scala:101:23] reg [39:0] s2_req_0_addr; // @[dcache.scala:632:25] assign io_lsu_nack_0_bits_out_addr = s2_req_0_addr; // @[util.scala:101:23] reg [63:0] s2_req_0_data; // @[dcache.scala:632:25] assign io_lsu_nack_0_bits_out_data = s2_req_0_data; // @[util.scala:101:23] reg s2_req_0_is_hella; // @[dcache.scala:632:25] wire cache_resp_0_bits_is_hella = s2_req_0_is_hella; // @[dcache.scala:632:25, :833:26] assign io_lsu_nack_0_bits_out_is_hella = s2_req_0_is_hella; // @[util.scala:101:23] reg [2:0] s2_type; // @[dcache.scala:633:25] wire _s2_valid_T = ~io_lsu_s1_kill_0_0; // @[dcache.scala:413:7, :636:26] wire _s2_valid_T_1 = s1_valid_0 & _s2_valid_T; // @[dcache.scala:427:49, :635:39, :636:26] wire [7:0] _s2_valid_T_2 = io_lsu_brupdate_b1_mispredict_mask_0 & s1_req_0_uop_br_mask; // @[util.scala:118:51] wire _s2_valid_T_3 = |_s2_valid_T_2; // @[util.scala:118:{51,59}] wire _s2_valid_T_4 = ~_s2_valid_T_3; // @[util.scala:118:59] wire _s2_valid_T_5 = _s2_valid_T_1 & _s2_valid_T_4; // @[dcache.scala:635:39, :636:45, :637:26] wire _s2_valid_T_6 = io_lsu_exception_0 & s1_req_0_uop_uses_ldq; // @[dcache.scala:413:7, :600:32, :638:45] wire _s2_valid_T_7 = ~_s2_valid_T_6; // @[dcache.scala:638:{26,45}] wire _s2_valid_T_8 = _s2_valid_T_5 & _s2_valid_T_7; // @[dcache.scala:636:45, :637:76, :638:26] wire _s2_valid_T_9 = s1_type == 3'h4; // @[dcache.scala:615:32, :639:56] wire _s2_valid_T_10 = s2_store_failed & _s2_valid_T_9; // @[dcache.scala:603:29, :639:{44,56}] wire _s2_valid_T_11 = _s2_valid_T_10 & s1_req_0_uop_uses_stq; // @[dcache.scala:600:32, :639:{44,67}] wire _s2_valid_T_12 = ~_s2_valid_T_11; // @[dcache.scala:639:{26,67}] wire _s2_valid_T_13 = _s2_valid_T_8 & _s2_valid_T_12; // @[dcache.scala:637:76, :638:72, :639:26] reg s2_valid_REG; // @[dcache.scala:635:26] wire s2_valid_0 = s2_valid_REG; // @[dcache.scala:427:49, :635:26] wire [7:0] _s2_req_0_uop_br_mask_T = ~io_lsu_brupdate_b1_resolve_mask_0; // @[util.scala:85:27] wire [7:0] _s2_req_0_uop_br_mask_T_1 = s1_req_0_uop_br_mask & _s2_req_0_uop_br_mask_T; // @[util.scala:85:{25,27}] reg [3:0] s2_tag_match_way_0; // @[dcache.scala:643:33] wire s2_tag_match_0 = |s2_tag_match_way_0; // @[dcache.scala:643:33, :644:49] reg [1:0] s2_hit_state_REG_state; // @[dcache.scala:645:93] wire [1:0] _s2_hit_state_WIRE_0_state = s2_hit_state_REG_state; // @[dcache.scala:622:47, :645:93] reg [1:0] s2_hit_state_REG_1_state; // @[dcache.scala:645:93] wire [1:0] _s2_hit_state_WIRE_1_state_0 = s2_hit_state_REG_1_state; // @[dcache.scala:622:47, :645:93] reg [1:0] s2_hit_state_REG_2_state; // @[dcache.scala:645:93] wire [1:0] _s2_hit_state_WIRE_2_state = s2_hit_state_REG_2_state; // @[dcache.scala:622:47, :645:93] reg [1:0] s2_hit_state_REG_3_state; // @[dcache.scala:645:93] wire [1:0] _s2_hit_state_WIRE_3_state = s2_hit_state_REG_3_state; // @[dcache.scala:622:47, :645:93] wire _s2_hit_state_T = s2_tag_match_way_0[0]; // @[Mux.scala:32:36] wire _s2_data_muxed_T = s2_tag_match_way_0[0]; // @[Mux.scala:32:36] wire _mshrs_io_meta_resp_bits_T = s2_tag_match_way_0[0]; // @[Mux.scala:32:36] wire _s2_hit_state_T_1 = s2_tag_match_way_0[1]; // @[Mux.scala:32:36] wire _s2_data_muxed_T_1 = s2_tag_match_way_0[1]; // @[Mux.scala:32:36] wire _mshrs_io_meta_resp_bits_T_1 = s2_tag_match_way_0[1]; // @[Mux.scala:32:36] wire _s2_hit_state_T_2 = s2_tag_match_way_0[2]; // @[Mux.scala:32:36] wire _s2_data_muxed_T_2 = s2_tag_match_way_0[2]; // @[Mux.scala:32:36] wire _mshrs_io_meta_resp_bits_T_2 = s2_tag_match_way_0[2]; // @[Mux.scala:32:36] wire _s2_hit_state_T_3 = s2_tag_match_way_0[3]; // @[Mux.scala:32:36] wire _s2_data_muxed_T_3 = s2_tag_match_way_0[3]; // @[Mux.scala:32:36] wire _mshrs_io_meta_resp_bits_T_3 = s2_tag_match_way_0[3]; // @[Mux.scala:32:36] wire [1:0] _s2_hit_state_WIRE_2; // @[Mux.scala:30:73] wire [1:0] s2_hit_state_0_state = _s2_hit_state_WIRE_1_state; // @[Mux.scala:30:73] wire [1:0] _s2_hit_state_T_4 = _s2_hit_state_T ? _s2_hit_state_WIRE_0_state : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _s2_hit_state_T_5 = _s2_hit_state_T_1 ? _s2_hit_state_WIRE_1_state_0 : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _s2_hit_state_T_6 = _s2_hit_state_T_2 ? _s2_hit_state_WIRE_2_state : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _s2_hit_state_T_7 = _s2_hit_state_T_3 ? _s2_hit_state_WIRE_3_state : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _s2_hit_state_T_8 = _s2_hit_state_T_4 | _s2_hit_state_T_5; // @[Mux.scala:30:73] wire [1:0] _s2_hit_state_T_9 = _s2_hit_state_T_8 | _s2_hit_state_T_6; // @[Mux.scala:30:73] wire [1:0] _s2_hit_state_T_10 = _s2_hit_state_T_9 | _s2_hit_state_T_7; // @[Mux.scala:30:73] assign _s2_hit_state_WIRE_2 = _s2_hit_state_T_10; // @[Mux.scala:30:73] assign _s2_hit_state_WIRE_1_state = _s2_hit_state_WIRE_2; // @[Mux.scala:30:73] wire [1:0] mshrs_io_req_0_bits_old_meta_meta_coh_state = s2_hit_state_0_state; // @[HellaCache.scala:305:20] wire _GEN_2 = s2_req_0_uop_mem_cmd == 5'h1; // @[Consts.scala:90:32] wire _s2_has_permission_r_c_cat_T; // @[Consts.scala:90:32] assign _s2_has_permission_r_c_cat_T = _GEN_2; // @[Consts.scala:90:32] wire _s2_has_permission_r_c_cat_T_23; // @[Consts.scala:90:32] assign _s2_has_permission_r_c_cat_T_23 = _GEN_2; // @[Consts.scala:90:32] wire _s2_new_hit_state_r_c_cat_T; // @[Consts.scala:90:32] assign _s2_new_hit_state_r_c_cat_T = _GEN_2; // @[Consts.scala:90:32] wire _s2_new_hit_state_r_c_cat_T_23; // @[Consts.scala:90:32] assign _s2_new_hit_state_r_c_cat_T_23 = _GEN_2; // @[Consts.scala:90:32] wire _s2_send_resp_T_3; // @[Consts.scala:90:32] assign _s2_send_resp_T_3 = _GEN_2; // @[Consts.scala:90:32] wire _mshrs_io_req_0_valid_T_50; // @[Consts.scala:90:32] assign _mshrs_io_req_0_valid_T_50 = _GEN_2; // @[Consts.scala:90:32] wire _s3_valid_T_1; // @[Consts.scala:90:32] assign _s3_valid_T_1 = _GEN_2; // @[Consts.scala:90:32] wire _GEN_3 = s2_req_0_uop_mem_cmd == 5'h11; // @[Consts.scala:90:49] wire _s2_has_permission_r_c_cat_T_1; // @[Consts.scala:90:49] assign _s2_has_permission_r_c_cat_T_1 = _GEN_3; // @[Consts.scala:90:49] wire _s2_has_permission_r_c_cat_T_24; // @[Consts.scala:90:49] assign _s2_has_permission_r_c_cat_T_24 = _GEN_3; // @[Consts.scala:90:49] wire _s2_new_hit_state_r_c_cat_T_1; // @[Consts.scala:90:49] assign _s2_new_hit_state_r_c_cat_T_1 = _GEN_3; // @[Consts.scala:90:49] wire _s2_new_hit_state_r_c_cat_T_24; // @[Consts.scala:90:49] assign _s2_new_hit_state_r_c_cat_T_24 = _GEN_3; // @[Consts.scala:90:49] wire _s2_send_resp_T_4; // @[Consts.scala:90:49] assign _s2_send_resp_T_4 = _GEN_3; // @[Consts.scala:90:49] wire _mshrs_io_req_0_valid_T_51; // @[Consts.scala:90:49] assign _mshrs_io_req_0_valid_T_51 = _GEN_3; // @[Consts.scala:90:49] wire _s3_valid_T_2; // @[Consts.scala:90:49] assign _s3_valid_T_2 = _GEN_3; // @[Consts.scala:90:49] wire _s2_has_permission_r_c_cat_T_2 = _s2_has_permission_r_c_cat_T | _s2_has_permission_r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _GEN_4 = s2_req_0_uop_mem_cmd == 5'h7; // @[Consts.scala:90:66] wire _s2_has_permission_r_c_cat_T_3; // @[Consts.scala:90:66] assign _s2_has_permission_r_c_cat_T_3 = _GEN_4; // @[Consts.scala:90:66] wire _s2_has_permission_r_c_cat_T_26; // @[Consts.scala:90:66] assign _s2_has_permission_r_c_cat_T_26 = _GEN_4; // @[Consts.scala:90:66] wire _s2_new_hit_state_r_c_cat_T_3; // @[Consts.scala:90:66] assign _s2_new_hit_state_r_c_cat_T_3 = _GEN_4; // @[Consts.scala:90:66] wire _s2_new_hit_state_r_c_cat_T_26; // @[Consts.scala:90:66] assign _s2_new_hit_state_r_c_cat_T_26 = _GEN_4; // @[Consts.scala:90:66] wire _s2_sc_T; // @[dcache.scala:664:37] assign _s2_sc_T = _GEN_4; // @[Consts.scala:90:66] wire _s2_send_resp_T_6; // @[Consts.scala:90:66] assign _s2_send_resp_T_6 = _GEN_4; // @[Consts.scala:90:66] wire _s2_send_resp_T_30; // @[package.scala:16:47] assign _s2_send_resp_T_30 = _GEN_4; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_27; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_27 = _GEN_4; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_53; // @[Consts.scala:90:66] assign _mshrs_io_req_0_valid_T_53 = _GEN_4; // @[Consts.scala:90:66] wire _s3_valid_T_4; // @[Consts.scala:90:66] assign _s3_valid_T_4 = _GEN_4; // @[Consts.scala:90:66] wire _s2_has_permission_r_c_cat_T_4 = _s2_has_permission_r_c_cat_T_2 | _s2_has_permission_r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _GEN_5 = s2_req_0_uop_mem_cmd == 5'h4; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_5; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_5 = _GEN_5; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_28; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_28 = _GEN_5; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_5; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_5 = _GEN_5; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_28; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_28 = _GEN_5; // @[package.scala:16:47] wire _s2_send_resp_T_8; // @[package.scala:16:47] assign _s2_send_resp_T_8 = _GEN_5; // @[package.scala:16:47] wire _s2_send_resp_T_34; // @[package.scala:16:47] assign _s2_send_resp_T_34 = _GEN_5; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_31; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_31 = _GEN_5; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_55; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_55 = _GEN_5; // @[package.scala:16:47] wire _s3_valid_T_6; // @[package.scala:16:47] assign _s3_valid_T_6 = _GEN_5; // @[package.scala:16:47] wire _GEN_6 = s2_req_0_uop_mem_cmd == 5'h9; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_6; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_6 = _GEN_6; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_29; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_29 = _GEN_6; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_6; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_6 = _GEN_6; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_29; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_29 = _GEN_6; // @[package.scala:16:47] wire _s2_send_resp_T_9; // @[package.scala:16:47] assign _s2_send_resp_T_9 = _GEN_6; // @[package.scala:16:47] wire _s2_send_resp_T_35; // @[package.scala:16:47] assign _s2_send_resp_T_35 = _GEN_6; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_32; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_32 = _GEN_6; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_56; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_56 = _GEN_6; // @[package.scala:16:47] wire _s3_valid_T_7; // @[package.scala:16:47] assign _s3_valid_T_7 = _GEN_6; // @[package.scala:16:47] wire _GEN_7 = s2_req_0_uop_mem_cmd == 5'hA; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_7; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_7 = _GEN_7; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_30; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_30 = _GEN_7; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_7; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_7 = _GEN_7; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_30; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_30 = _GEN_7; // @[package.scala:16:47] wire _s2_send_resp_T_10; // @[package.scala:16:47] assign _s2_send_resp_T_10 = _GEN_7; // @[package.scala:16:47] wire _s2_send_resp_T_36; // @[package.scala:16:47] assign _s2_send_resp_T_36 = _GEN_7; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_33; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_33 = _GEN_7; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_57; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_57 = _GEN_7; // @[package.scala:16:47] wire _s3_valid_T_8; // @[package.scala:16:47] assign _s3_valid_T_8 = _GEN_7; // @[package.scala:16:47] wire _GEN_8 = s2_req_0_uop_mem_cmd == 5'hB; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_8; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_8 = _GEN_8; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_31; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_31 = _GEN_8; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_8; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_8 = _GEN_8; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_31; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_31 = _GEN_8; // @[package.scala:16:47] wire _s2_send_resp_T_11; // @[package.scala:16:47] assign _s2_send_resp_T_11 = _GEN_8; // @[package.scala:16:47] wire _s2_send_resp_T_37; // @[package.scala:16:47] assign _s2_send_resp_T_37 = _GEN_8; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_34; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_34 = _GEN_8; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_58; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_58 = _GEN_8; // @[package.scala:16:47] wire _s3_valid_T_9; // @[package.scala:16:47] assign _s3_valid_T_9 = _GEN_8; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_9 = _s2_has_permission_r_c_cat_T_5 | _s2_has_permission_r_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_10 = _s2_has_permission_r_c_cat_T_9 | _s2_has_permission_r_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_11 = _s2_has_permission_r_c_cat_T_10 | _s2_has_permission_r_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _GEN_9 = s2_req_0_uop_mem_cmd == 5'h8; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_12; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_12 = _GEN_9; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_35; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_35 = _GEN_9; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_12; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_12 = _GEN_9; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_35; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_35 = _GEN_9; // @[package.scala:16:47] wire _s2_send_resp_T_15; // @[package.scala:16:47] assign _s2_send_resp_T_15 = _GEN_9; // @[package.scala:16:47] wire _s2_send_resp_T_41; // @[package.scala:16:47] assign _s2_send_resp_T_41 = _GEN_9; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_38; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_38 = _GEN_9; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_62; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_62 = _GEN_9; // @[package.scala:16:47] wire _s3_valid_T_13; // @[package.scala:16:47] assign _s3_valid_T_13 = _GEN_9; // @[package.scala:16:47] wire _GEN_10 = s2_req_0_uop_mem_cmd == 5'hC; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_13; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_13 = _GEN_10; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_36; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_36 = _GEN_10; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_13; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_13 = _GEN_10; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_36; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_36 = _GEN_10; // @[package.scala:16:47] wire _s2_send_resp_T_16; // @[package.scala:16:47] assign _s2_send_resp_T_16 = _GEN_10; // @[package.scala:16:47] wire _s2_send_resp_T_42; // @[package.scala:16:47] assign _s2_send_resp_T_42 = _GEN_10; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_39; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_39 = _GEN_10; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_63; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_63 = _GEN_10; // @[package.scala:16:47] wire _s3_valid_T_14; // @[package.scala:16:47] assign _s3_valid_T_14 = _GEN_10; // @[package.scala:16:47] wire _GEN_11 = s2_req_0_uop_mem_cmd == 5'hD; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_14; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_14 = _GEN_11; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_37; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_37 = _GEN_11; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_14; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_14 = _GEN_11; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_37; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_37 = _GEN_11; // @[package.scala:16:47] wire _s2_send_resp_T_17; // @[package.scala:16:47] assign _s2_send_resp_T_17 = _GEN_11; // @[package.scala:16:47] wire _s2_send_resp_T_43; // @[package.scala:16:47] assign _s2_send_resp_T_43 = _GEN_11; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_40; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_40 = _GEN_11; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_64; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_64 = _GEN_11; // @[package.scala:16:47] wire _s3_valid_T_15; // @[package.scala:16:47] assign _s3_valid_T_15 = _GEN_11; // @[package.scala:16:47] wire _GEN_12 = s2_req_0_uop_mem_cmd == 5'hE; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_15; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_15 = _GEN_12; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_38; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_38 = _GEN_12; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_15; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_15 = _GEN_12; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_38; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_38 = _GEN_12; // @[package.scala:16:47] wire _s2_send_resp_T_18; // @[package.scala:16:47] assign _s2_send_resp_T_18 = _GEN_12; // @[package.scala:16:47] wire _s2_send_resp_T_44; // @[package.scala:16:47] assign _s2_send_resp_T_44 = _GEN_12; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_41; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_41 = _GEN_12; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_65; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_65 = _GEN_12; // @[package.scala:16:47] wire _s3_valid_T_16; // @[package.scala:16:47] assign _s3_valid_T_16 = _GEN_12; // @[package.scala:16:47] wire _GEN_13 = s2_req_0_uop_mem_cmd == 5'hF; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_16; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_16 = _GEN_13; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_39; // @[package.scala:16:47] assign _s2_has_permission_r_c_cat_T_39 = _GEN_13; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_16; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_16 = _GEN_13; // @[package.scala:16:47] wire _s2_new_hit_state_r_c_cat_T_39; // @[package.scala:16:47] assign _s2_new_hit_state_r_c_cat_T_39 = _GEN_13; // @[package.scala:16:47] wire _s2_send_resp_T_19; // @[package.scala:16:47] assign _s2_send_resp_T_19 = _GEN_13; // @[package.scala:16:47] wire _s2_send_resp_T_45; // @[package.scala:16:47] assign _s2_send_resp_T_45 = _GEN_13; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_42; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_42 = _GEN_13; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_66; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_66 = _GEN_13; // @[package.scala:16:47] wire _s3_valid_T_17; // @[package.scala:16:47] assign _s3_valid_T_17 = _GEN_13; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_17 = _s2_has_permission_r_c_cat_T_12 | _s2_has_permission_r_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_18 = _s2_has_permission_r_c_cat_T_17 | _s2_has_permission_r_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_19 = _s2_has_permission_r_c_cat_T_18 | _s2_has_permission_r_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_20 = _s2_has_permission_r_c_cat_T_19 | _s2_has_permission_r_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_21 = _s2_has_permission_r_c_cat_T_11 | _s2_has_permission_r_c_cat_T_20; // @[package.scala:81:59] wire _s2_has_permission_r_c_cat_T_22 = _s2_has_permission_r_c_cat_T_4 | _s2_has_permission_r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _s2_has_permission_r_c_cat_T_25 = _s2_has_permission_r_c_cat_T_23 | _s2_has_permission_r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _s2_has_permission_r_c_cat_T_27 = _s2_has_permission_r_c_cat_T_25 | _s2_has_permission_r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _s2_has_permission_r_c_cat_T_32 = _s2_has_permission_r_c_cat_T_28 | _s2_has_permission_r_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_33 = _s2_has_permission_r_c_cat_T_32 | _s2_has_permission_r_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_34 = _s2_has_permission_r_c_cat_T_33 | _s2_has_permission_r_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_40 = _s2_has_permission_r_c_cat_T_35 | _s2_has_permission_r_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_41 = _s2_has_permission_r_c_cat_T_40 | _s2_has_permission_r_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_42 = _s2_has_permission_r_c_cat_T_41 | _s2_has_permission_r_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_43 = _s2_has_permission_r_c_cat_T_42 | _s2_has_permission_r_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _s2_has_permission_r_c_cat_T_44 = _s2_has_permission_r_c_cat_T_34 | _s2_has_permission_r_c_cat_T_43; // @[package.scala:81:59] wire _s2_has_permission_r_c_cat_T_45 = _s2_has_permission_r_c_cat_T_27 | _s2_has_permission_r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _GEN_14 = s2_req_0_uop_mem_cmd == 5'h3; // @[Consts.scala:91:54] wire _s2_has_permission_r_c_cat_T_46; // @[Consts.scala:91:54] assign _s2_has_permission_r_c_cat_T_46 = _GEN_14; // @[Consts.scala:91:54] wire _s2_new_hit_state_r_c_cat_T_46; // @[Consts.scala:91:54] assign _s2_new_hit_state_r_c_cat_T_46 = _GEN_14; // @[Consts.scala:91:54] wire _mshrs_io_req_0_valid_T_22; // @[Consts.scala:88:52] assign _mshrs_io_req_0_valid_T_22 = _GEN_14; // @[Consts.scala:88:52, :91:54] wire _s2_has_permission_r_c_cat_T_47 = _s2_has_permission_r_c_cat_T_45 | _s2_has_permission_r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _GEN_15 = s2_req_0_uop_mem_cmd == 5'h6; // @[Consts.scala:91:71] wire _s2_has_permission_r_c_cat_T_48; // @[Consts.scala:91:71] assign _s2_has_permission_r_c_cat_T_48 = _GEN_15; // @[Consts.scala:91:71] wire _s2_new_hit_state_r_c_cat_T_48; // @[Consts.scala:91:71] assign _s2_new_hit_state_r_c_cat_T_48 = _GEN_15; // @[Consts.scala:91:71] wire _s2_lr_T; // @[dcache.scala:663:37] assign _s2_lr_T = _GEN_15; // @[Consts.scala:91:71] wire _s2_send_resp_T_29; // @[package.scala:16:47] assign _s2_send_resp_T_29 = _GEN_15; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_26; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_26 = _GEN_15; // @[package.scala:16:47] wire _s2_has_permission_r_c_cat_T_49 = _s2_has_permission_r_c_cat_T_47 | _s2_has_permission_r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] s2_has_permission_r_c = {_s2_has_permission_r_c_cat_T_22, _s2_has_permission_r_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _s2_has_permission_r_T = {s2_has_permission_r_c, s2_hit_state_0_state}; // @[Metadata.scala:29:18, :58:19] wire _s2_has_permission_r_T_25 = _s2_has_permission_r_T == 4'hC; // @[Misc.scala:49:20] wire [1:0] _s2_has_permission_r_T_27 = {1'h0, _s2_has_permission_r_T_25}; // @[Misc.scala:35:36, :49:20] wire _s2_has_permission_r_T_28 = _s2_has_permission_r_T == 4'hD; // @[Misc.scala:49:20] wire [1:0] _s2_has_permission_r_T_30 = _s2_has_permission_r_T_28 ? 2'h2 : _s2_has_permission_r_T_27; // @[Misc.scala:35:36, :49:20] wire _s2_has_permission_r_T_31 = _s2_has_permission_r_T == 4'h4; // @[Misc.scala:49:20] wire [1:0] _s2_has_permission_r_T_33 = _s2_has_permission_r_T_31 ? 2'h1 : _s2_has_permission_r_T_30; // @[Misc.scala:35:36, :49:20] wire _s2_has_permission_r_T_34 = _s2_has_permission_r_T == 4'h5; // @[Misc.scala:49:20] wire [1:0] _s2_has_permission_r_T_36 = _s2_has_permission_r_T_34 ? 2'h2 : _s2_has_permission_r_T_33; // @[Misc.scala:35:36, :49:20] wire _s2_has_permission_r_T_37 = _s2_has_permission_r_T == 4'h0; // @[Misc.scala:49:20] wire [1:0] _s2_has_permission_r_T_39 = _s2_has_permission_r_T_37 ? 2'h0 : _s2_has_permission_r_T_36; // @[Misc.scala:35:36, :49:20] wire _s2_has_permission_r_T_40 = _s2_has_permission_r_T == 4'hE; // @[Misc.scala:49:20] wire _s2_has_permission_r_T_41 = _s2_has_permission_r_T_40; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_has_permission_r_T_42 = _s2_has_permission_r_T_40 ? 2'h3 : _s2_has_permission_r_T_39; // @[Misc.scala:35:36, :49:20] wire _s2_has_permission_r_T_43 = &_s2_has_permission_r_T; // @[Misc.scala:49:20] wire _s2_has_permission_r_T_44 = _s2_has_permission_r_T_43 | _s2_has_permission_r_T_41; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_has_permission_r_T_45 = _s2_has_permission_r_T_43 ? 2'h3 : _s2_has_permission_r_T_42; // @[Misc.scala:35:36, :49:20] wire _s2_has_permission_r_T_46 = _s2_has_permission_r_T == 4'h6; // @[Misc.scala:49:20] wire _s2_has_permission_r_T_47 = _s2_has_permission_r_T_46 | _s2_has_permission_r_T_44; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_has_permission_r_T_48 = _s2_has_permission_r_T_46 ? 2'h2 : _s2_has_permission_r_T_45; // @[Misc.scala:35:36, :49:20] wire _s2_has_permission_r_T_49 = _s2_has_permission_r_T == 4'h7; // @[Misc.scala:49:20] wire _s2_has_permission_r_T_50 = _s2_has_permission_r_T_49 | _s2_has_permission_r_T_47; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_has_permission_r_T_51 = _s2_has_permission_r_T_49 ? 2'h3 : _s2_has_permission_r_T_48; // @[Misc.scala:35:36, :49:20] wire _s2_has_permission_r_T_52 = _s2_has_permission_r_T == 4'h1; // @[Misc.scala:49:20] wire _s2_has_permission_r_T_53 = _s2_has_permission_r_T_52 | _s2_has_permission_r_T_50; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_has_permission_r_T_54 = _s2_has_permission_r_T_52 ? 2'h1 : _s2_has_permission_r_T_51; // @[Misc.scala:35:36, :49:20] wire _s2_has_permission_r_T_55 = _s2_has_permission_r_T == 4'h2; // @[Misc.scala:49:20] wire _s2_has_permission_r_T_56 = _s2_has_permission_r_T_55 | _s2_has_permission_r_T_53; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_has_permission_r_T_57 = _s2_has_permission_r_T_55 ? 2'h2 : _s2_has_permission_r_T_54; // @[Misc.scala:35:36, :49:20] wire _s2_has_permission_r_T_58 = _s2_has_permission_r_T == 4'h3; // @[Misc.scala:49:20] wire s2_has_permission_r_1 = _s2_has_permission_r_T_58 | _s2_has_permission_r_T_56; // @[Misc.scala:35:9, :49:20] wire s2_has_permission_0 = s2_has_permission_r_1; // @[Misc.scala:35:9] wire [1:0] s2_has_permission_r_2 = _s2_has_permission_r_T_58 ? 2'h3 : _s2_has_permission_r_T_57; // @[Misc.scala:35:36, :49:20] wire [1:0] s2_has_permission_meta_state = s2_has_permission_r_2; // @[Misc.scala:35:36] wire _s2_new_hit_state_r_c_cat_T_2 = _s2_new_hit_state_r_c_cat_T | _s2_new_hit_state_r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _s2_new_hit_state_r_c_cat_T_4 = _s2_new_hit_state_r_c_cat_T_2 | _s2_new_hit_state_r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _s2_new_hit_state_r_c_cat_T_9 = _s2_new_hit_state_r_c_cat_T_5 | _s2_new_hit_state_r_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_10 = _s2_new_hit_state_r_c_cat_T_9 | _s2_new_hit_state_r_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_11 = _s2_new_hit_state_r_c_cat_T_10 | _s2_new_hit_state_r_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_17 = _s2_new_hit_state_r_c_cat_T_12 | _s2_new_hit_state_r_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_18 = _s2_new_hit_state_r_c_cat_T_17 | _s2_new_hit_state_r_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_19 = _s2_new_hit_state_r_c_cat_T_18 | _s2_new_hit_state_r_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_20 = _s2_new_hit_state_r_c_cat_T_19 | _s2_new_hit_state_r_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_21 = _s2_new_hit_state_r_c_cat_T_11 | _s2_new_hit_state_r_c_cat_T_20; // @[package.scala:81:59] wire _s2_new_hit_state_r_c_cat_T_22 = _s2_new_hit_state_r_c_cat_T_4 | _s2_new_hit_state_r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _s2_new_hit_state_r_c_cat_T_25 = _s2_new_hit_state_r_c_cat_T_23 | _s2_new_hit_state_r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _s2_new_hit_state_r_c_cat_T_27 = _s2_new_hit_state_r_c_cat_T_25 | _s2_new_hit_state_r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _s2_new_hit_state_r_c_cat_T_32 = _s2_new_hit_state_r_c_cat_T_28 | _s2_new_hit_state_r_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_33 = _s2_new_hit_state_r_c_cat_T_32 | _s2_new_hit_state_r_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_34 = _s2_new_hit_state_r_c_cat_T_33 | _s2_new_hit_state_r_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_40 = _s2_new_hit_state_r_c_cat_T_35 | _s2_new_hit_state_r_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_41 = _s2_new_hit_state_r_c_cat_T_40 | _s2_new_hit_state_r_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_42 = _s2_new_hit_state_r_c_cat_T_41 | _s2_new_hit_state_r_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_43 = _s2_new_hit_state_r_c_cat_T_42 | _s2_new_hit_state_r_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _s2_new_hit_state_r_c_cat_T_44 = _s2_new_hit_state_r_c_cat_T_34 | _s2_new_hit_state_r_c_cat_T_43; // @[package.scala:81:59] wire _s2_new_hit_state_r_c_cat_T_45 = _s2_new_hit_state_r_c_cat_T_27 | _s2_new_hit_state_r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _s2_new_hit_state_r_c_cat_T_47 = _s2_new_hit_state_r_c_cat_T_45 | _s2_new_hit_state_r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _s2_new_hit_state_r_c_cat_T_49 = _s2_new_hit_state_r_c_cat_T_47 | _s2_new_hit_state_r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] s2_new_hit_state_r_c = {_s2_new_hit_state_r_c_cat_T_22, _s2_new_hit_state_r_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _s2_new_hit_state_r_T = {s2_new_hit_state_r_c, s2_hit_state_0_state}; // @[Metadata.scala:29:18, :58:19] wire _s2_new_hit_state_r_T_25 = _s2_new_hit_state_r_T == 4'hC; // @[Misc.scala:49:20] wire [1:0] _s2_new_hit_state_r_T_27 = {1'h0, _s2_new_hit_state_r_T_25}; // @[Misc.scala:35:36, :49:20] wire _s2_new_hit_state_r_T_28 = _s2_new_hit_state_r_T == 4'hD; // @[Misc.scala:49:20] wire [1:0] _s2_new_hit_state_r_T_30 = _s2_new_hit_state_r_T_28 ? 2'h2 : _s2_new_hit_state_r_T_27; // @[Misc.scala:35:36, :49:20] wire _s2_new_hit_state_r_T_31 = _s2_new_hit_state_r_T == 4'h4; // @[Misc.scala:49:20] wire [1:0] _s2_new_hit_state_r_T_33 = _s2_new_hit_state_r_T_31 ? 2'h1 : _s2_new_hit_state_r_T_30; // @[Misc.scala:35:36, :49:20] wire _s2_new_hit_state_r_T_34 = _s2_new_hit_state_r_T == 4'h5; // @[Misc.scala:49:20] wire [1:0] _s2_new_hit_state_r_T_36 = _s2_new_hit_state_r_T_34 ? 2'h2 : _s2_new_hit_state_r_T_33; // @[Misc.scala:35:36, :49:20] wire _s2_new_hit_state_r_T_37 = _s2_new_hit_state_r_T == 4'h0; // @[Misc.scala:49:20] wire [1:0] _s2_new_hit_state_r_T_39 = _s2_new_hit_state_r_T_37 ? 2'h0 : _s2_new_hit_state_r_T_36; // @[Misc.scala:35:36, :49:20] wire _s2_new_hit_state_r_T_40 = _s2_new_hit_state_r_T == 4'hE; // @[Misc.scala:49:20] wire _s2_new_hit_state_r_T_41 = _s2_new_hit_state_r_T_40; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_new_hit_state_r_T_42 = _s2_new_hit_state_r_T_40 ? 2'h3 : _s2_new_hit_state_r_T_39; // @[Misc.scala:35:36, :49:20] wire _s2_new_hit_state_r_T_43 = &_s2_new_hit_state_r_T; // @[Misc.scala:49:20] wire _s2_new_hit_state_r_T_44 = _s2_new_hit_state_r_T_43 | _s2_new_hit_state_r_T_41; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_new_hit_state_r_T_45 = _s2_new_hit_state_r_T_43 ? 2'h3 : _s2_new_hit_state_r_T_42; // @[Misc.scala:35:36, :49:20] wire _s2_new_hit_state_r_T_46 = _s2_new_hit_state_r_T == 4'h6; // @[Misc.scala:49:20] wire _s2_new_hit_state_r_T_47 = _s2_new_hit_state_r_T_46 | _s2_new_hit_state_r_T_44; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_new_hit_state_r_T_48 = _s2_new_hit_state_r_T_46 ? 2'h2 : _s2_new_hit_state_r_T_45; // @[Misc.scala:35:36, :49:20] wire _s2_new_hit_state_r_T_49 = _s2_new_hit_state_r_T == 4'h7; // @[Misc.scala:49:20] wire _s2_new_hit_state_r_T_50 = _s2_new_hit_state_r_T_49 | _s2_new_hit_state_r_T_47; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_new_hit_state_r_T_51 = _s2_new_hit_state_r_T_49 ? 2'h3 : _s2_new_hit_state_r_T_48; // @[Misc.scala:35:36, :49:20] wire _s2_new_hit_state_r_T_52 = _s2_new_hit_state_r_T == 4'h1; // @[Misc.scala:49:20] wire _s2_new_hit_state_r_T_53 = _s2_new_hit_state_r_T_52 | _s2_new_hit_state_r_T_50; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_new_hit_state_r_T_54 = _s2_new_hit_state_r_T_52 ? 2'h1 : _s2_new_hit_state_r_T_51; // @[Misc.scala:35:36, :49:20] wire _s2_new_hit_state_r_T_55 = _s2_new_hit_state_r_T == 4'h2; // @[Misc.scala:49:20] wire _s2_new_hit_state_r_T_56 = _s2_new_hit_state_r_T_55 | _s2_new_hit_state_r_T_53; // @[Misc.scala:35:9, :49:20] wire [1:0] _s2_new_hit_state_r_T_57 = _s2_new_hit_state_r_T_55 ? 2'h2 : _s2_new_hit_state_r_T_54; // @[Misc.scala:35:36, :49:20] wire _s2_new_hit_state_r_T_58 = _s2_new_hit_state_r_T == 4'h3; // @[Misc.scala:49:20] wire s2_new_hit_state_r_1 = _s2_new_hit_state_r_T_58 | _s2_new_hit_state_r_T_56; // @[Misc.scala:35:9, :49:20] wire [1:0] s2_new_hit_state_r_2 = _s2_new_hit_state_r_T_58 ? 2'h3 : _s2_new_hit_state_r_T_57; // @[Misc.scala:35:36, :49:20] wire [1:0] s2_new_hit_state_meta_state = s2_new_hit_state_r_2; // @[Misc.scala:35:36] wire [1:0] s2_new_hit_state_0_state = s2_new_hit_state_meta_state; // @[Metadata.scala:160:20] wire _s2_hit_T = s2_tag_match_0 & s2_has_permission_0; // @[dcache.scala:427:49, :644:49, :649:47] wire _s2_hit_T_1 = s2_hit_state_0_state == s2_new_hit_state_0_state; // @[Metadata.scala:46:46] wire _s2_hit_T_2 = _s2_hit_T & _s2_hit_T_1; // @[Metadata.scala:46:46] wire _s2_hit_T_3 = ~_mshrs_io_block_hit_0; // @[dcache.scala:433:21, :649:117] wire _s2_hit_T_4 = _s2_hit_T_2 & _s2_hit_T_3; // @[dcache.scala:649:{71,114,117}] wire _T_67 = s2_type == 3'h0; // @[package.scala:16:47] wire _s2_hit_T_5; // @[package.scala:16:47] assign _s2_hit_T_5 = _T_67; // @[package.scala:16:47] wire _s2_lr_T_2; // @[dcache.scala:663:83] assign _s2_lr_T_2 = _T_67; // @[package.scala:16:47] wire _s2_sc_T_2; // @[dcache.scala:664:83] assign _s2_sc_T_2 = _T_67; // @[package.scala:16:47] wire _s2_hit_T_6 = s2_type == 3'h2; // @[package.scala:16:47] wire _s2_hit_T_7 = _s2_hit_T_5 | _s2_hit_T_6; // @[package.scala:16:47, :81:59] wire _s2_hit_T_8 = _s2_hit_T_4 | _s2_hit_T_7; // @[package.scala:81:59] wire s2_hit_0 = _s2_hit_T_8; // @[dcache.scala:427:49, :649:141] wire s2_nack_0; // @[dcache.scala:650:21] reg s2_wb_idx_matches_0; // @[dcache.scala:654:34] reg [39:0] debug_sc_fail_addr; // @[dcache.scala:657:35] reg [7:0] debug_sc_fail_cnt; // @[dcache.scala:658:35] reg [6:0] lrsc_count; // @[dcache.scala:660:27] wire lrsc_valid = |(lrsc_count[6:2]); // @[dcache.scala:660:27, :661:31] reg [33:0] lrsc_addr; // @[dcache.scala:662:23] reg s2_lr_REG; // @[dcache.scala:663:59] wire _s2_lr_T_1 = ~s2_lr_REG; // @[dcache.scala:663:{51,59}] wire _s2_lr_T_3 = _s2_lr_T_1 | _s2_lr_T_2; // @[dcache.scala:663:{51,72,83}] wire s2_lr = _s2_lr_T & _s2_lr_T_3; // @[dcache.scala:663:{37,47,72}] reg s2_sc_REG; // @[dcache.scala:664:59] wire _s2_sc_T_1 = ~s2_sc_REG; // @[dcache.scala:664:{51,59}] wire _s2_sc_T_3 = _s2_sc_T_1 | _s2_sc_T_2; // @[dcache.scala:664:{51,72,83}] wire s2_sc = _s2_sc_T & _s2_sc_T_3; // @[dcache.scala:664:{37,47,72}] wire cache_resp_0_bits_data_doZero_2 = s2_sc; // @[AMOALU.scala:43:31] wire [33:0] _s2_lrsc_addr_match_T = s2_req_0_addr[39:6]; // @[dcache.scala:632:25, :665:86] wire [33:0] _lrsc_addr_T = s2_req_0_addr[39:6]; // @[dcache.scala:632:25, :665:86, :672:35] wire _s2_lrsc_addr_match_T_1 = lrsc_addr == _s2_lrsc_addr_match_T; // @[dcache.scala:662:23, :665:{66,86}] wire _s2_lrsc_addr_match_T_2 = lrsc_valid & _s2_lrsc_addr_match_T_1; // @[dcache.scala:661:31, :665:{53,66}] wire s2_lrsc_addr_match_0 = _s2_lrsc_addr_match_T_2; // @[dcache.scala:427:49, :665:53] wire _s2_sc_fail_T = ~s2_lrsc_addr_match_0; // @[dcache.scala:427:49, :666:29] wire s2_sc_fail = s2_sc & _s2_sc_fail_T; // @[dcache.scala:664:47, :666:{26,29}] wire [7:0] _lrsc_count_T = {1'h0, lrsc_count} - 8'h1; // @[dcache.scala:660:27, :667:54] wire [6:0] _lrsc_count_T_1 = _lrsc_count_T[6:0]; // @[dcache.scala:667:54] wire _mshrs_io_req_0_valid_T_10 = s2_type == 3'h4; // @[package.scala:16:47] wire [8:0] _debug_sc_fail_cnt_T = {1'h0, debug_sc_fail_cnt} + 9'h1; // @[dcache.scala:658:35, :692:48] wire [7:0] _debug_sc_fail_cnt_T_1 = _debug_sc_fail_cnt_T[7:0]; // @[dcache.scala:692:48] wire [63:0] s2_data_0_0; // @[dcache.scala:705:21] wire [63:0] s2_data_0_1; // @[dcache.scala:705:21] wire [63:0] s2_data_0_2; // @[dcache.scala:705:21] wire [63:0] s2_data_0_3; // @[dcache.scala:705:21] wire [63:0] _s2_data_muxed_T_4 = _s2_data_muxed_T ? s2_data_0_0 : 64'h0; // @[Mux.scala:30:73, :32:36] wire [63:0] _s2_data_muxed_T_5 = _s2_data_muxed_T_1 ? s2_data_0_1 : 64'h0; // @[Mux.scala:30:73, :32:36] wire [63:0] _s2_data_muxed_T_6 = _s2_data_muxed_T_2 ? s2_data_0_2 : 64'h0; // @[Mux.scala:30:73, :32:36] wire [63:0] _s2_data_muxed_T_7 = _s2_data_muxed_T_3 ? s2_data_0_3 : 64'h0; // @[Mux.scala:30:73, :32:36] wire [63:0] _s2_data_muxed_T_8 = _s2_data_muxed_T_4 | _s2_data_muxed_T_5; // @[Mux.scala:30:73] wire [63:0] _s2_data_muxed_T_9 = _s2_data_muxed_T_8 | _s2_data_muxed_T_6; // @[Mux.scala:30:73] wire [63:0] _s2_data_muxed_T_10 = _s2_data_muxed_T_9 | _s2_data_muxed_T_7; // @[Mux.scala:30:73] wire [63:0] _s2_data_muxed_WIRE = _s2_data_muxed_T_10; // @[Mux.scala:30:73] wire [63:0] s2_data_muxed_0 = _s2_data_muxed_WIRE; // @[Mux.scala:30:73] wire [63:0] _s2_data_word_prebypass_T_1 = s2_data_muxed_0; // @[dcache.scala:427:49, :825:63] wire replace; // @[Replacement.scala:37:29] wire [1:0] lfsr_lo_lo_lo = {_lfsr_prng_io_out_1, _lfsr_prng_io_out_0}; // @[PRNG.scala:91:22, :95:17] wire [1:0] lfsr_lo_lo_hi = {_lfsr_prng_io_out_3, _lfsr_prng_io_out_2}; // @[PRNG.scala:91:22, :95:17] wire [3:0] lfsr_lo_lo = {lfsr_lo_lo_hi, lfsr_lo_lo_lo}; // @[PRNG.scala:95:17] wire [1:0] lfsr_lo_hi_lo = {_lfsr_prng_io_out_5, _lfsr_prng_io_out_4}; // @[PRNG.scala:91:22, :95:17] wire [1:0] lfsr_lo_hi_hi = {_lfsr_prng_io_out_7, _lfsr_prng_io_out_6}; // @[PRNG.scala:91:22, :95:17] wire [3:0] lfsr_lo_hi = {lfsr_lo_hi_hi, lfsr_lo_hi_lo}; // @[PRNG.scala:95:17] wire [7:0] lfsr_lo = {lfsr_lo_hi, lfsr_lo_lo}; // @[PRNG.scala:95:17] wire [1:0] lfsr_hi_lo_lo = {_lfsr_prng_io_out_9, _lfsr_prng_io_out_8}; // @[PRNG.scala:91:22, :95:17] wire [1:0] lfsr_hi_lo_hi = {_lfsr_prng_io_out_11, _lfsr_prng_io_out_10}; // @[PRNG.scala:91:22, :95:17] wire [3:0] lfsr_hi_lo = {lfsr_hi_lo_hi, lfsr_hi_lo_lo}; // @[PRNG.scala:95:17] wire [1:0] lfsr_hi_hi_lo = {_lfsr_prng_io_out_13, _lfsr_prng_io_out_12}; // @[PRNG.scala:91:22, :95:17] wire [1:0] lfsr_hi_hi_hi = {_lfsr_prng_io_out_15, _lfsr_prng_io_out_14}; // @[PRNG.scala:91:22, :95:17] wire [3:0] lfsr_hi_hi = {lfsr_hi_hi_hi, lfsr_hi_hi_lo}; // @[PRNG.scala:95:17] wire [7:0] lfsr_hi = {lfsr_hi_hi, lfsr_hi_lo}; // @[PRNG.scala:95:17] wire [15:0] lfsr = {lfsr_hi, lfsr_lo}; // @[PRNG.scala:95:17] wire [1:0] _s1_replaced_way_en_T = lfsr[1:0]; // @[PRNG.scala:95:17] wire [1:0] _s2_replaced_way_en_T = lfsr[1:0]; // @[PRNG.scala:95:17] wire [3:0] s1_replaced_way_en = 4'h1 << _s1_replaced_way_en_T; // @[OneHot.scala:58:35] reg [1:0] s2_replaced_way_en_REG; // @[dcache.scala:718:44] wire [3:0] s2_replaced_way_en = 4'h1 << s2_replaced_way_en_REG; // @[OneHot.scala:58:35] reg [1:0] s2_repl_meta_REG_coh_state; // @[dcache.scala:719:88] wire [1:0] _s2_repl_meta_WIRE_0_coh_state = s2_repl_meta_REG_coh_state; // @[dcache.scala:622:47, :719:88] reg [19:0] s2_repl_meta_REG_tag; // @[dcache.scala:719:88] wire [19:0] _s2_repl_meta_WIRE_0_tag = s2_repl_meta_REG_tag; // @[dcache.scala:622:47, :719:88] reg [1:0] s2_repl_meta_REG_1_coh_state; // @[dcache.scala:719:88] wire [1:0] _s2_repl_meta_WIRE_1_coh_state_0 = s2_repl_meta_REG_1_coh_state; // @[dcache.scala:622:47, :719:88] reg [19:0] s2_repl_meta_REG_1_tag; // @[dcache.scala:719:88] wire [19:0] _s2_repl_meta_WIRE_1_tag_0 = s2_repl_meta_REG_1_tag; // @[dcache.scala:622:47, :719:88] reg [1:0] s2_repl_meta_REG_2_coh_state; // @[dcache.scala:719:88] wire [1:0] _s2_repl_meta_WIRE_2_coh_state = s2_repl_meta_REG_2_coh_state; // @[dcache.scala:622:47, :719:88] reg [19:0] s2_repl_meta_REG_2_tag; // @[dcache.scala:719:88] wire [19:0] _s2_repl_meta_WIRE_2_tag = s2_repl_meta_REG_2_tag; // @[dcache.scala:622:47, :719:88] reg [1:0] s2_repl_meta_REG_3_coh_state; // @[dcache.scala:719:88] wire [1:0] _s2_repl_meta_WIRE_3_coh_state = s2_repl_meta_REG_3_coh_state; // @[dcache.scala:622:47, :719:88] reg [19:0] s2_repl_meta_REG_3_tag; // @[dcache.scala:719:88] wire [19:0] _s2_repl_meta_WIRE_3_tag = s2_repl_meta_REG_3_tag; // @[dcache.scala:622:47, :719:88] wire _s2_repl_meta_T = s2_replaced_way_en[0]; // @[OneHot.scala:58:35] wire _s2_repl_meta_T_1 = s2_replaced_way_en[1]; // @[OneHot.scala:58:35] wire _s2_repl_meta_T_2 = s2_replaced_way_en[2]; // @[OneHot.scala:58:35] wire _s2_repl_meta_T_3 = s2_replaced_way_en[3]; // @[OneHot.scala:58:35] wire [1:0] _s2_repl_meta_WIRE_3_state; // @[Mux.scala:30:73] wire [19:0] _s2_repl_meta_WIRE_2; // @[Mux.scala:30:73] wire [1:0] s2_repl_meta_0_coh_state = _s2_repl_meta_WIRE_1_coh_state; // @[Mux.scala:30:73] wire [19:0] s2_repl_meta_0_tag = _s2_repl_meta_WIRE_1_tag; // @[Mux.scala:30:73] wire [19:0] _s2_repl_meta_T_4 = _s2_repl_meta_T ? _s2_repl_meta_WIRE_0_tag : 20'h0; // @[Mux.scala:30:73, :32:36] wire [19:0] _s2_repl_meta_T_5 = _s2_repl_meta_T_1 ? _s2_repl_meta_WIRE_1_tag_0 : 20'h0; // @[Mux.scala:30:73, :32:36] wire [19:0] _s2_repl_meta_T_6 = _s2_repl_meta_T_2 ? _s2_repl_meta_WIRE_2_tag : 20'h0; // @[Mux.scala:30:73, :32:36] wire [19:0] _s2_repl_meta_T_7 = _s2_repl_meta_T_3 ? _s2_repl_meta_WIRE_3_tag : 20'h0; // @[Mux.scala:30:73, :32:36] wire [19:0] _s2_repl_meta_T_8 = _s2_repl_meta_T_4 | _s2_repl_meta_T_5; // @[Mux.scala:30:73] wire [19:0] _s2_repl_meta_T_9 = _s2_repl_meta_T_8 | _s2_repl_meta_T_6; // @[Mux.scala:30:73] wire [19:0] _s2_repl_meta_T_10 = _s2_repl_meta_T_9 | _s2_repl_meta_T_7; // @[Mux.scala:30:73] assign _s2_repl_meta_WIRE_2 = _s2_repl_meta_T_10; // @[Mux.scala:30:73] assign _s2_repl_meta_WIRE_1_tag = _s2_repl_meta_WIRE_2; // @[Mux.scala:30:73] wire [1:0] _s2_repl_meta_WIRE_4; // @[Mux.scala:30:73] assign _s2_repl_meta_WIRE_1_coh_state = _s2_repl_meta_WIRE_3_state; // @[Mux.scala:30:73] wire [1:0] _s2_repl_meta_T_11 = _s2_repl_meta_T ? _s2_repl_meta_WIRE_0_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _s2_repl_meta_T_12 = _s2_repl_meta_T_1 ? _s2_repl_meta_WIRE_1_coh_state_0 : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _s2_repl_meta_T_13 = _s2_repl_meta_T_2 ? _s2_repl_meta_WIRE_2_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _s2_repl_meta_T_14 = _s2_repl_meta_T_3 ? _s2_repl_meta_WIRE_3_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _s2_repl_meta_T_15 = _s2_repl_meta_T_11 | _s2_repl_meta_T_12; // @[Mux.scala:30:73] wire [1:0] _s2_repl_meta_T_16 = _s2_repl_meta_T_15 | _s2_repl_meta_T_13; // @[Mux.scala:30:73] wire [1:0] _s2_repl_meta_T_17 = _s2_repl_meta_T_16 | _s2_repl_meta_T_14; // @[Mux.scala:30:73] assign _s2_repl_meta_WIRE_4 = _s2_repl_meta_T_17; // @[Mux.scala:30:73] assign _s2_repl_meta_WIRE_3_state = _s2_repl_meta_WIRE_4; // @[Mux.scala:30:73] wire [19:0] mshrs_io_req_0_bits_old_meta_meta_tag = s2_repl_meta_0_tag; // @[HellaCache.scala:305:20] reg s2_nack_hit_0; // @[dcache.scala:722:31] wire _GEN_16 = s2_valid_0 & s2_hit_0; // @[dcache.scala:427:49, :724:50] wire _s2_nack_victim_T; // @[dcache.scala:724:50] assign _s2_nack_victim_T = _GEN_16; // @[dcache.scala:724:50] wire _s3_valid_T; // @[dcache.scala:871:38] assign _s3_valid_T = _GEN_16; // @[dcache.scala:724:50, :871:38] wire _s2_nack_victim_T_1 = _s2_nack_victim_T & _mshrs_io_secondary_miss_0; // @[dcache.scala:433:21, :724:{50,64}] wire s2_nack_victim_0 = _s2_nack_victim_T_1; // @[dcache.scala:427:49, :724:64] wire _s2_nack_miss_T = ~s2_hit_0; // @[dcache.scala:427:49, :651:36, :726:53] wire _s2_nack_miss_T_1 = s2_valid_0 & _s2_nack_miss_T; // @[dcache.scala:427:49, :726:{50,53}] wire _s2_nack_miss_T_2 = ~_mshrs_io_req_0_ready; // @[dcache.scala:433:21, :726:67] wire _s2_nack_miss_T_3 = _s2_nack_miss_T_1 & _s2_nack_miss_T_2; // @[dcache.scala:726:{50,64,67}] wire s2_nack_miss_0 = _s2_nack_miss_T_3; // @[dcache.scala:427:49, :726:64] wire _s2_nack_wb_T = ~s2_hit_0; // @[dcache.scala:427:49, :651:36, :730:53] wire _s2_nack_wb_T_1 = s2_valid_0 & _s2_nack_wb_T; // @[dcache.scala:427:49, :730:{50,53}] wire _s2_nack_wb_T_2 = _s2_nack_wb_T_1 & s2_wb_idx_matches_0; // @[dcache.scala:654:34, :730:{50,64}] wire s2_nack_wb_0 = _s2_nack_wb_T_2; // @[dcache.scala:427:49, :730:64] assign s2_nack_0 = (s2_nack_miss_0 | s2_nack_hit_0 | s2_nack_victim_0 | s2_nack_wb_0) & (|s2_type); // @[dcache.scala:427:49, :633:25, :650:21, :722:31, :732:{55,73,113,131,142}] reg s2_send_resp_REG; // @[dcache.scala:733:44] wire _s2_send_resp_T = ~s2_nack_0; // @[dcache.scala:650:21, :668:60, :733:73] wire _s2_send_resp_T_1 = s2_send_resp_REG & _s2_send_resp_T; // @[dcache.scala:733:{44,70,73}] wire _mshrs_io_req_0_valid_T_74; // @[dcache.scala:754:77] wire _T_73 = _mshrs_io_req_0_ready & _mshrs_io_req_0_valid_T_74; // @[Decoupled.scala:51:35] assign replace = _T_73; // @[Decoupled.scala:51:35] wire _s2_send_resp_T_2; // @[Decoupled.scala:51:35] assign _s2_send_resp_T_2 = _T_73; // @[Decoupled.scala:51:35] wire _s2_send_resp_T_5 = _s2_send_resp_T_3 | _s2_send_resp_T_4; // @[Consts.scala:90:{32,42,49}] wire _s2_send_resp_T_7 = _s2_send_resp_T_5 | _s2_send_resp_T_6; // @[Consts.scala:90:{42,59,66}] wire _s2_send_resp_T_12 = _s2_send_resp_T_8 | _s2_send_resp_T_9; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_13 = _s2_send_resp_T_12 | _s2_send_resp_T_10; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_14 = _s2_send_resp_T_13 | _s2_send_resp_T_11; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_20 = _s2_send_resp_T_15 | _s2_send_resp_T_16; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_21 = _s2_send_resp_T_20 | _s2_send_resp_T_17; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_22 = _s2_send_resp_T_21 | _s2_send_resp_T_18; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_23 = _s2_send_resp_T_22 | _s2_send_resp_T_19; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_24 = _s2_send_resp_T_14 | _s2_send_resp_T_23; // @[package.scala:81:59] wire _s2_send_resp_T_25 = _s2_send_resp_T_7 | _s2_send_resp_T_24; // @[Consts.scala:87:44, :90:{59,76}] wire _s2_send_resp_T_26 = _s2_send_resp_T_2 & _s2_send_resp_T_25; // @[Decoupled.scala:51:35] wire _GEN_17 = s2_req_0_uop_mem_cmd == 5'h0; // @[package.scala:16:47] wire _s2_send_resp_T_27; // @[package.scala:16:47] assign _s2_send_resp_T_27 = _GEN_17; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_24; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_24 = _GEN_17; // @[package.scala:16:47] wire _GEN_18 = s2_req_0_uop_mem_cmd == 5'h10; // @[package.scala:16:47] wire _s2_send_resp_T_28; // @[package.scala:16:47] assign _s2_send_resp_T_28 = _GEN_18; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_25; // @[package.scala:16:47] assign _mshrs_io_req_0_valid_T_25 = _GEN_18; // @[package.scala:16:47] wire _s2_send_resp_T_31 = _s2_send_resp_T_27 | _s2_send_resp_T_28; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_32 = _s2_send_resp_T_31 | _s2_send_resp_T_29; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_33 = _s2_send_resp_T_32 | _s2_send_resp_T_30; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_38 = _s2_send_resp_T_34 | _s2_send_resp_T_35; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_39 = _s2_send_resp_T_38 | _s2_send_resp_T_36; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_40 = _s2_send_resp_T_39 | _s2_send_resp_T_37; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_46 = _s2_send_resp_T_41 | _s2_send_resp_T_42; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_47 = _s2_send_resp_T_46 | _s2_send_resp_T_43; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_48 = _s2_send_resp_T_47 | _s2_send_resp_T_44; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_49 = _s2_send_resp_T_48 | _s2_send_resp_T_45; // @[package.scala:16:47, :81:59] wire _s2_send_resp_T_50 = _s2_send_resp_T_40 | _s2_send_resp_T_49; // @[package.scala:81:59] wire _s2_send_resp_T_51 = _s2_send_resp_T_33 | _s2_send_resp_T_50; // @[package.scala:81:59] wire _s2_send_resp_T_52 = ~_s2_send_resp_T_51; // @[Consts.scala:89:68] wire _s2_send_resp_T_53 = _s2_send_resp_T_26 & _s2_send_resp_T_52; // @[dcache.scala:734:{59,93,96}] wire _s2_send_resp_T_54 = s2_hit_0 | _s2_send_resp_T_53; // @[dcache.scala:427:49, :734:{34,93}] wire _s2_send_resp_T_55 = _s2_send_resp_T_1 & _s2_send_resp_T_54; // @[dcache.scala:733:{70,85}, :734:34] wire s2_send_resp_0 = _s2_send_resp_T_55; // @[dcache.scala:427:49, :733:85] reg s2_send_nack_REG; // @[dcache.scala:735:44] wire _s2_send_nack_T = s2_send_nack_REG & s2_nack_0; // @[dcache.scala:650:21, :735:{44,70}] wire s2_send_nack_0 = _s2_send_nack_T; // @[dcache.scala:427:49, :735:70] wire _s2_store_failed_T = s2_valid_0 & s2_nack_0; // @[dcache.scala:427:49, :650:21, :742:34] wire _s2_store_failed_T_1 = _s2_store_failed_T & s2_send_nack_0; // @[dcache.scala:427:49, :742:{34,48}] assign _s2_store_failed_T_2 = _s2_store_failed_T_1 & s2_req_0_uop_uses_stq; // @[dcache.scala:632:25, :742:{48,67}] assign s2_store_failed = _s2_store_failed_T_2; // @[dcache.scala:603:29, :742:67] wire _mshrs_io_req_0_valid_T = ~s2_hit_0; // @[dcache.scala:427:49, :651:36, :747:29] wire _mshrs_io_req_0_valid_T_1 = s2_valid_0 & _mshrs_io_req_0_valid_T; // @[dcache.scala:427:49, :746:51, :747:29] wire _mshrs_io_req_0_valid_T_2 = ~s2_nack_hit_0; // @[dcache.scala:722:31, :748:29] wire _mshrs_io_req_0_valid_T_3 = _mshrs_io_req_0_valid_T_1 & _mshrs_io_req_0_valid_T_2; // @[dcache.scala:746:51, :747:51, :748:29] wire _mshrs_io_req_0_valid_T_4 = ~s2_nack_victim_0; // @[dcache.scala:427:49, :749:29] wire _mshrs_io_req_0_valid_T_5 = _mshrs_io_req_0_valid_T_3 & _mshrs_io_req_0_valid_T_4; // @[dcache.scala:747:51, :748:51, :749:29] wire _mshrs_io_req_0_valid_T_7 = _mshrs_io_req_0_valid_T_5; // @[dcache.scala:748:51, :749:51] wire _mshrs_io_req_0_valid_T_8 = ~s2_nack_wb_0; // @[dcache.scala:427:49, :751:29] wire _mshrs_io_req_0_valid_T_9 = _mshrs_io_req_0_valid_T_7 & _mshrs_io_req_0_valid_T_8; // @[dcache.scala:749:51, :750:51, :751:29] wire _mshrs_io_req_0_valid_T_11 = s2_type == 3'h5; // @[package.scala:16:47] wire _mshrs_io_req_0_valid_T_12 = _mshrs_io_req_0_valid_T_10 | _mshrs_io_req_0_valid_T_11; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_13 = _mshrs_io_req_0_valid_T_9 & _mshrs_io_req_0_valid_T_12; // @[package.scala:81:59] wire [7:0] _GEN_19 = io_lsu_brupdate_b1_mispredict_mask_0 & s2_req_0_uop_br_mask; // @[util.scala:118:51] wire [7:0] _mshrs_io_req_0_valid_T_14; // @[util.scala:118:51] assign _mshrs_io_req_0_valid_T_14 = _GEN_19; // @[util.scala:118:51] wire [7:0] _io_lsu_nack_0_valid_T_4; // @[util.scala:118:51] assign _io_lsu_nack_0_valid_T_4 = _GEN_19; // @[util.scala:118:51] wire _mshrs_io_req_0_valid_T_15 = |_mshrs_io_req_0_valid_T_14; // @[util.scala:118:{51,59}] wire _mshrs_io_req_0_valid_T_16 = ~_mshrs_io_req_0_valid_T_15; // @[util.scala:118:59] wire _mshrs_io_req_0_valid_T_17 = _mshrs_io_req_0_valid_T_13 & _mshrs_io_req_0_valid_T_16; // @[dcache.scala:751:51, :752:77, :753:29] wire _GEN_20 = io_lsu_exception_0 & s2_req_0_uop_uses_ldq; // @[dcache.scala:413:7, :632:25, :754:48] wire _mshrs_io_req_0_valid_T_18; // @[dcache.scala:754:48] assign _mshrs_io_req_0_valid_T_18 = _GEN_20; // @[dcache.scala:754:48] wire _io_lsu_nack_0_valid_T_1; // @[dcache.scala:863:48] assign _io_lsu_nack_0_valid_T_1 = _GEN_20; // @[dcache.scala:754:48, :863:48] wire _mshrs_io_req_0_valid_T_19 = ~_mshrs_io_req_0_valid_T_18; // @[dcache.scala:754:{29,48}] wire _mshrs_io_req_0_valid_T_20 = _mshrs_io_req_0_valid_T_17 & _mshrs_io_req_0_valid_T_19; // @[dcache.scala:752:77, :753:79, :754:29] wire _mshrs_io_req_0_valid_T_21 = s2_req_0_uop_mem_cmd == 5'h2; // @[Consts.scala:88:35] wire _mshrs_io_req_0_valid_T_23 = _mshrs_io_req_0_valid_T_21 | _mshrs_io_req_0_valid_T_22; // @[Consts.scala:88:{35,45,52}] wire _mshrs_io_req_0_valid_T_28 = _mshrs_io_req_0_valid_T_24 | _mshrs_io_req_0_valid_T_25; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_29 = _mshrs_io_req_0_valid_T_28 | _mshrs_io_req_0_valid_T_26; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_30 = _mshrs_io_req_0_valid_T_29 | _mshrs_io_req_0_valid_T_27; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_35 = _mshrs_io_req_0_valid_T_31 | _mshrs_io_req_0_valid_T_32; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_36 = _mshrs_io_req_0_valid_T_35 | _mshrs_io_req_0_valid_T_33; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_37 = _mshrs_io_req_0_valid_T_36 | _mshrs_io_req_0_valid_T_34; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_43 = _mshrs_io_req_0_valid_T_38 | _mshrs_io_req_0_valid_T_39; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_44 = _mshrs_io_req_0_valid_T_43 | _mshrs_io_req_0_valid_T_40; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_45 = _mshrs_io_req_0_valid_T_44 | _mshrs_io_req_0_valid_T_41; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_46 = _mshrs_io_req_0_valid_T_45 | _mshrs_io_req_0_valid_T_42; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_47 = _mshrs_io_req_0_valid_T_37 | _mshrs_io_req_0_valid_T_46; // @[package.scala:81:59] wire _mshrs_io_req_0_valid_T_48 = _mshrs_io_req_0_valid_T_30 | _mshrs_io_req_0_valid_T_47; // @[package.scala:81:59] wire _mshrs_io_req_0_valid_T_49 = _mshrs_io_req_0_valid_T_23 | _mshrs_io_req_0_valid_T_48; // @[Consts.scala:88:45, :89:68] wire _mshrs_io_req_0_valid_T_52 = _mshrs_io_req_0_valid_T_50 | _mshrs_io_req_0_valid_T_51; // @[Consts.scala:90:{32,42,49}] wire _mshrs_io_req_0_valid_T_54 = _mshrs_io_req_0_valid_T_52 | _mshrs_io_req_0_valid_T_53; // @[Consts.scala:90:{42,59,66}] wire _mshrs_io_req_0_valid_T_59 = _mshrs_io_req_0_valid_T_55 | _mshrs_io_req_0_valid_T_56; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_60 = _mshrs_io_req_0_valid_T_59 | _mshrs_io_req_0_valid_T_57; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_61 = _mshrs_io_req_0_valid_T_60 | _mshrs_io_req_0_valid_T_58; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_67 = _mshrs_io_req_0_valid_T_62 | _mshrs_io_req_0_valid_T_63; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_68 = _mshrs_io_req_0_valid_T_67 | _mshrs_io_req_0_valid_T_64; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_69 = _mshrs_io_req_0_valid_T_68 | _mshrs_io_req_0_valid_T_65; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_70 = _mshrs_io_req_0_valid_T_69 | _mshrs_io_req_0_valid_T_66; // @[package.scala:16:47, :81:59] wire _mshrs_io_req_0_valid_T_71 = _mshrs_io_req_0_valid_T_61 | _mshrs_io_req_0_valid_T_70; // @[package.scala:81:59] wire _mshrs_io_req_0_valid_T_72 = _mshrs_io_req_0_valid_T_54 | _mshrs_io_req_0_valid_T_71; // @[Consts.scala:87:44, :90:{59,76}] wire _mshrs_io_req_0_valid_T_73 = _mshrs_io_req_0_valid_T_49 | _mshrs_io_req_0_valid_T_72; // @[Consts.scala:90:76] assign _mshrs_io_req_0_valid_T_74 = _mshrs_io_req_0_valid_T_20 & _mshrs_io_req_0_valid_T_73; // @[dcache.scala:753:79, :754:77, :756:65] wire [7:0] _mshrs_io_req_0_bits_uop_br_mask_T = ~io_lsu_brupdate_b1_resolve_mask_0; // @[util.scala:85:27] wire [7:0] _mshrs_io_req_0_bits_uop_br_mask_T_1 = s2_req_0_uop_br_mask & _mshrs_io_req_0_bits_uop_br_mask_T; // @[util.scala:85:{25,27}] wire [1:0] _mshrs_io_req_0_bits_old_meta_T_coh_state = s2_tag_match_0 ? mshrs_io_req_0_bits_old_meta_meta_coh_state : s2_repl_meta_0_coh_state; // @[HellaCache.scala:305:20] wire [19:0] _mshrs_io_req_0_bits_old_meta_T_tag = s2_tag_match_0 ? mshrs_io_req_0_bits_old_meta_meta_tag : s2_repl_meta_0_tag; // @[HellaCache.scala:305:20] wire [3:0] _mshrs_io_req_0_bits_way_en_T = s2_tag_match_0 ? s2_tag_match_way_0 : s2_replaced_way_en; // @[OneHot.scala:58:35] wire _mshrs_io_req_is_probe_0_T = s2_type == 3'h1; // @[dcache.scala:633:25, :769:49] wire _mshrs_io_req_is_probe_0_T_1 = _mshrs_io_req_is_probe_0_T & s2_valid_0; // @[dcache.scala:427:49, :769:{49,61}] wire _mshrs_io_meta_resp_valid_T = ~s2_nack_hit_0; // @[dcache.scala:722:31, :748:29, :772:36] wire _mshrs_io_meta_resp_valid_T_1 = _mshrs_io_meta_resp_valid_T | _prober_io_mshr_wb_rdy; // @[dcache.scala:432:22, :772:{36,52}] reg [1:0] mshrs_io_meta_resp_bits_REG_0_coh_state; // @[dcache.scala:773:70] reg [19:0] mshrs_io_meta_resp_bits_REG_0_tag; // @[dcache.scala:773:70] reg [1:0] mshrs_io_meta_resp_bits_REG_1_coh_state; // @[dcache.scala:773:70] reg [19:0] mshrs_io_meta_resp_bits_REG_1_tag; // @[dcache.scala:773:70] reg [1:0] mshrs_io_meta_resp_bits_REG_2_coh_state; // @[dcache.scala:773:70] reg [19:0] mshrs_io_meta_resp_bits_REG_2_tag; // @[dcache.scala:773:70] reg [1:0] mshrs_io_meta_resp_bits_REG_3_coh_state; // @[dcache.scala:773:70] reg [19:0] mshrs_io_meta_resp_bits_REG_3_tag; // @[dcache.scala:773:70] wire [1:0] _mshrs_io_meta_resp_bits_WIRE_2_state; // @[Mux.scala:30:73] wire [19:0] _mshrs_io_meta_resp_bits_WIRE_1; // @[Mux.scala:30:73] wire [19:0] _mshrs_io_meta_resp_bits_T_4 = _mshrs_io_meta_resp_bits_T ? mshrs_io_meta_resp_bits_REG_0_tag : 20'h0; // @[Mux.scala:30:73, :32:36] wire [19:0] _mshrs_io_meta_resp_bits_T_5 = _mshrs_io_meta_resp_bits_T_1 ? mshrs_io_meta_resp_bits_REG_1_tag : 20'h0; // @[Mux.scala:30:73, :32:36] wire [19:0] _mshrs_io_meta_resp_bits_T_6 = _mshrs_io_meta_resp_bits_T_2 ? mshrs_io_meta_resp_bits_REG_2_tag : 20'h0; // @[Mux.scala:30:73, :32:36] wire [19:0] _mshrs_io_meta_resp_bits_T_7 = _mshrs_io_meta_resp_bits_T_3 ? mshrs_io_meta_resp_bits_REG_3_tag : 20'h0; // @[Mux.scala:30:73, :32:36] wire [19:0] _mshrs_io_meta_resp_bits_T_8 = _mshrs_io_meta_resp_bits_T_4 | _mshrs_io_meta_resp_bits_T_5; // @[Mux.scala:30:73] wire [19:0] _mshrs_io_meta_resp_bits_T_9 = _mshrs_io_meta_resp_bits_T_8 | _mshrs_io_meta_resp_bits_T_6; // @[Mux.scala:30:73] wire [19:0] _mshrs_io_meta_resp_bits_T_10 = _mshrs_io_meta_resp_bits_T_9 | _mshrs_io_meta_resp_bits_T_7; // @[Mux.scala:30:73] assign _mshrs_io_meta_resp_bits_WIRE_1 = _mshrs_io_meta_resp_bits_T_10; // @[Mux.scala:30:73] wire [19:0] _mshrs_io_meta_resp_bits_WIRE_tag = _mshrs_io_meta_resp_bits_WIRE_1; // @[Mux.scala:30:73] wire [1:0] _mshrs_io_meta_resp_bits_WIRE_3; // @[Mux.scala:30:73] wire [1:0] _mshrs_io_meta_resp_bits_WIRE_coh_state = _mshrs_io_meta_resp_bits_WIRE_2_state; // @[Mux.scala:30:73] wire [1:0] _mshrs_io_meta_resp_bits_T_11 = _mshrs_io_meta_resp_bits_T ? mshrs_io_meta_resp_bits_REG_0_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _mshrs_io_meta_resp_bits_T_12 = _mshrs_io_meta_resp_bits_T_1 ? mshrs_io_meta_resp_bits_REG_1_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _mshrs_io_meta_resp_bits_T_13 = _mshrs_io_meta_resp_bits_T_2 ? mshrs_io_meta_resp_bits_REG_2_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _mshrs_io_meta_resp_bits_T_14 = _mshrs_io_meta_resp_bits_T_3 ? mshrs_io_meta_resp_bits_REG_3_coh_state : 2'h0; // @[Mux.scala:30:73, :32:36] wire [1:0] _mshrs_io_meta_resp_bits_T_15 = _mshrs_io_meta_resp_bits_T_11 | _mshrs_io_meta_resp_bits_T_12; // @[Mux.scala:30:73] wire [1:0] _mshrs_io_meta_resp_bits_T_16 = _mshrs_io_meta_resp_bits_T_15 | _mshrs_io_meta_resp_bits_T_13; // @[Mux.scala:30:73] wire [1:0] _mshrs_io_meta_resp_bits_T_17 = _mshrs_io_meta_resp_bits_T_16 | _mshrs_io_meta_resp_bits_T_14; // @[Mux.scala:30:73] assign _mshrs_io_meta_resp_bits_WIRE_3 = _mshrs_io_meta_resp_bits_T_17; // @[Mux.scala:30:73] assign _mshrs_io_meta_resp_bits_WIRE_2_state = _mshrs_io_meta_resp_bits_WIRE_3; // @[Mux.scala:30:73] wire _prober_io_req_valid_T = ~lrsc_valid; // @[dcache.scala:661:31, :778:46] wire _prober_io_req_valid_T_1 = nodeOut_b_valid & _prober_io_req_valid_T; // @[MixedNode.scala:542:17] wire _nodeOut_b_ready_T = ~lrsc_valid; // @[dcache.scala:661:31, :778:46, :779:51] assign _nodeOut_b_ready_T_1 = _prober_io_req_ready & _nodeOut_b_ready_T; // @[dcache.scala:432:22, :779:{48,51}] assign nodeOut_b_ready = _nodeOut_b_ready_T_1; // @[MixedNode.scala:542:17] wire _prober_io_wb_rdy_T = _prober_io_meta_write_bits_idx != _wb_io_idx_bits; // @[dcache.scala:431:18, :432:22, :785:59] wire _prober_io_wb_rdy_T_1 = ~_wb_io_idx_valid; // @[dcache.scala:431:18, :785:82] wire _prober_io_wb_rdy_T_2 = _prober_io_wb_rdy_T | _prober_io_wb_rdy_T_1; // @[dcache.scala:785:{59,79,82}] wire _wb_io_mem_grant_T_1 = nodeOut_d_bits_source == 2'h2; // @[MixedNode.scala:542:17] assign nodeOut_d_ready = _wb_io_mem_grant_T_1 | _mshrs_io_mem_grant_ready; // @[MixedNode.scala:542:17] wire _wb_io_mem_grant_T = nodeOut_d_ready & nodeOut_d_valid; // @[Decoupled.scala:51:35] wire _wb_io_mem_grant_T_2 = _wb_io_mem_grant_T & _wb_io_mem_grant_T_1; // @[Decoupled.scala:51:35] wire opdata = _wb_io_release_bits_opcode[0]; // @[Edges.scala:102:36] wire [26:0] _decode_T_3 = 27'hFFF << _prober_io_rep_bits_size; // @[package.scala:243:71] wire [11:0] _decode_T_4 = _decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _decode_T_5 = ~_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] decode_1 = _decode_T_5[11:3]; // @[package.scala:243:46] reg [8:0] beatsLeft; // @[Arbiter.scala:60:30] wire idle = beatsLeft == 9'h0; // @[Arbiter.scala:60:30, :61:28] wire latch = idle & nodeOut_c_ready; // @[Arbiter.scala:61:28, :62:24] wire [1:0] _readys_T = {_prober_io_rep_valid, _wb_io_release_valid}; // @[Arbiter.scala:68:51] wire [2:0] _readys_T_1 = {_readys_T, 1'h0}; // @[package.scala:253:48] wire [1:0] _readys_T_2 = _readys_T_1[1:0]; // @[package.scala:253:{48,53}] wire [1:0] _readys_T_3 = _readys_T | _readys_T_2; // @[package.scala:253:{43,53}] wire [1:0] _readys_T_4 = _readys_T_3; // @[package.scala:253:43, :254:17] wire [2:0] _readys_T_5 = {_readys_T_4, 1'h0}; // @[package.scala:254:17] wire [1:0] _readys_T_6 = _readys_T_5[1:0]; // @[Arbiter.scala:16:{78,83}] wire [1:0] _readys_T_7 = ~_readys_T_6; // @[Arbiter.scala:16:{61,83}] wire _readys_T_8 = _readys_T_7[0]; // @[Arbiter.scala:16:61, :68:76] wire readys_0 = _readys_T_8; // @[Arbiter.scala:68:{27,76}] wire _readys_T_9 = _readys_T_7[1]; // @[Arbiter.scala:16:61, :68:76] wire readys_1 = _readys_T_9; // @[Arbiter.scala:68:{27,76}] wire _winner_T = readys_0 & _wb_io_release_valid; // @[Arbiter.scala:68:27, :71:69] wire winner_0 = _winner_T; // @[Arbiter.scala:71:{27,69}] wire _winner_T_1 = readys_1 & _prober_io_rep_valid; // @[Arbiter.scala:68:27, :71:69] wire winner_1 = _winner_T_1; // @[Arbiter.scala:71:{27,69}] wire prefixOR_1 = winner_0; // @[Arbiter.scala:71:27, :76:48] wire _prefixOR_T = prefixOR_1 | winner_1; // @[Arbiter.scala:71:27, :76:48] wire _nodeOut_c_valid_T = _wb_io_release_valid | _prober_io_rep_valid; // @[Arbiter.scala:79:31, :96:46] wire [8:0] maskedBeats_0 = winner_0 & opdata ? 9'h7 : 9'h0; // @[Edges.scala:102:36, :221:14] wire [8:0] initBeats = maskedBeats_0; // @[Arbiter.scala:82:69, :84:44] wire _GEN_21 = nodeOut_c_ready & nodeOut_c_valid; // @[Decoupled.scala:51:35] wire _beatsLeft_T; // @[Decoupled.scala:51:35] assign _beatsLeft_T = _GEN_21; // @[Decoupled.scala:51:35] wire _io_lsu_perf_release_T; // @[Decoupled.scala:51:35] assign _io_lsu_perf_release_T = _GEN_21; // @[Decoupled.scala:51:35] wire [9:0] _beatsLeft_T_1 = {1'h0, beatsLeft} - {9'h0, _beatsLeft_T}; // @[Decoupled.scala:51:35] wire [8:0] _beatsLeft_T_2 = _beatsLeft_T_1[8:0]; // @[Arbiter.scala:85:52] wire [8:0] _beatsLeft_T_3 = latch ? initBeats : _beatsLeft_T_2; // @[Arbiter.scala:62:24, :84:44, :85:{23,52}] reg state_0; // @[Arbiter.scala:88:26] reg state_1; // @[Arbiter.scala:88:26] wire muxState_0 = idle ? winner_0 : state_0; // @[Arbiter.scala:61:28, :71:27, :88:26, :89:25] wire muxState_1 = idle ? winner_1 : state_1; // @[Arbiter.scala:61:28, :71:27, :88:26, :89:25] wire allowed_0 = idle ? readys_0 : state_0; // @[Arbiter.scala:61:28, :68:27, :88:26, :92:24] wire allowed_1 = idle ? readys_1 : state_1; // @[Arbiter.scala:61:28, :68:27, :88:26, :92:24] wire _wb_io_release_ready_T = nodeOut_c_ready & allowed_0; // @[Arbiter.scala:92:24, :94:31] wire _prober_io_rep_ready_T = nodeOut_c_ready & allowed_1; // @[Arbiter.scala:92:24, :94:31] wire _nodeOut_c_valid_T_1 = state_0 & _wb_io_release_valid; // @[Mux.scala:30:73] wire _nodeOut_c_valid_T_2 = state_1 & _prober_io_rep_valid; // @[Mux.scala:30:73] wire _nodeOut_c_valid_T_3 = _nodeOut_c_valid_T_1 | _nodeOut_c_valid_T_2; // @[Mux.scala:30:73] wire _nodeOut_c_valid_WIRE = _nodeOut_c_valid_T_3; // @[Mux.scala:30:73] assign _nodeOut_c_valid_T_4 = idle ? _nodeOut_c_valid_T : _nodeOut_c_valid_WIRE; // @[Mux.scala:30:73] assign nodeOut_c_valid = _nodeOut_c_valid_T_4; // @[Arbiter.scala:96:24] wire [2:0] _nodeOut_c_bits_WIRE_9; // @[Mux.scala:30:73] assign nodeOut_c_bits_opcode = _nodeOut_c_bits_WIRE_opcode; // @[Mux.scala:30:73] wire [2:0] _nodeOut_c_bits_WIRE_8; // @[Mux.scala:30:73] assign nodeOut_c_bits_param = _nodeOut_c_bits_WIRE_param; // @[Mux.scala:30:73] wire [3:0] _nodeOut_c_bits_WIRE_7; // @[Mux.scala:30:73] assign nodeOut_c_bits_size = _nodeOut_c_bits_WIRE_size; // @[Mux.scala:30:73] wire [1:0] _nodeOut_c_bits_WIRE_6; // @[Mux.scala:30:73] assign nodeOut_c_bits_source = _nodeOut_c_bits_WIRE_source; // @[Mux.scala:30:73] wire [31:0] _nodeOut_c_bits_WIRE_5; // @[Mux.scala:30:73] assign nodeOut_c_bits_address = _nodeOut_c_bits_WIRE_address; // @[Mux.scala:30:73] wire [63:0] _nodeOut_c_bits_WIRE_2; // @[Mux.scala:30:73] assign nodeOut_c_bits_data = _nodeOut_c_bits_WIRE_data; // @[Mux.scala:30:73] wire [63:0] _nodeOut_c_bits_T_3 = muxState_0 ? _wb_io_release_bits_data : 64'h0; // @[Mux.scala:30:73] wire [63:0] _nodeOut_c_bits_T_5 = _nodeOut_c_bits_T_3; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_2 = _nodeOut_c_bits_T_5; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_data = _nodeOut_c_bits_WIRE_2; // @[Mux.scala:30:73] wire [31:0] _nodeOut_c_bits_T_6 = muxState_0 ? _wb_io_release_bits_address : 32'h0; // @[Mux.scala:30:73] wire [31:0] _nodeOut_c_bits_T_7 = muxState_1 ? _prober_io_rep_bits_address : 32'h0; // @[Mux.scala:30:73] wire [31:0] _nodeOut_c_bits_T_8 = _nodeOut_c_bits_T_6 | _nodeOut_c_bits_T_7; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_5 = _nodeOut_c_bits_T_8; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_address = _nodeOut_c_bits_WIRE_5; // @[Mux.scala:30:73] wire [1:0] _nodeOut_c_bits_T_9 = {muxState_0, 1'h0}; // @[Mux.scala:30:73] wire [1:0] _nodeOut_c_bits_T_10 = muxState_1 ? _prober_io_rep_bits_source : 2'h0; // @[Mux.scala:30:73] wire [1:0] _nodeOut_c_bits_T_11 = _nodeOut_c_bits_T_9 | _nodeOut_c_bits_T_10; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_6 = _nodeOut_c_bits_T_11; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_source = _nodeOut_c_bits_WIRE_6; // @[Mux.scala:30:73] wire [3:0] _nodeOut_c_bits_T_12 = muxState_0 ? 4'h6 : 4'h0; // @[Mux.scala:30:73] wire [3:0] _nodeOut_c_bits_T_13 = muxState_1 ? _prober_io_rep_bits_size : 4'h0; // @[Mux.scala:30:73] wire [3:0] _nodeOut_c_bits_T_14 = _nodeOut_c_bits_T_12 | _nodeOut_c_bits_T_13; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_7 = _nodeOut_c_bits_T_14; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_size = _nodeOut_c_bits_WIRE_7; // @[Mux.scala:30:73] wire [2:0] _nodeOut_c_bits_T_15 = muxState_0 ? _wb_io_release_bits_param : 3'h0; // @[Mux.scala:30:73] wire [2:0] _nodeOut_c_bits_T_16 = muxState_1 ? _prober_io_rep_bits_param : 3'h0; // @[Mux.scala:30:73] wire [2:0] _nodeOut_c_bits_T_17 = _nodeOut_c_bits_T_15 | _nodeOut_c_bits_T_16; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_8 = _nodeOut_c_bits_T_17; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_param = _nodeOut_c_bits_WIRE_8; // @[Mux.scala:30:73] wire [2:0] _nodeOut_c_bits_T_18 = muxState_0 ? _wb_io_release_bits_opcode : 3'h0; // @[Mux.scala:30:73] wire [2:0] _nodeOut_c_bits_T_19 = {muxState_1, 2'h0}; // @[Mux.scala:30:73] wire [2:0] _nodeOut_c_bits_T_20 = _nodeOut_c_bits_T_18 | _nodeOut_c_bits_T_19; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_9 = _nodeOut_c_bits_T_20; // @[Mux.scala:30:73] assign _nodeOut_c_bits_WIRE_opcode = _nodeOut_c_bits_WIRE_9; // @[Mux.scala:30:73] wire [26:0] _io_lsu_perf_release_beats1_decode_T = 27'hFFF << nodeOut_c_bits_size; // @[package.scala:243:71] wire [11:0] _io_lsu_perf_release_beats1_decode_T_1 = _io_lsu_perf_release_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _io_lsu_perf_release_beats1_decode_T_2 = ~_io_lsu_perf_release_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] io_lsu_perf_release_beats1_decode = _io_lsu_perf_release_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire io_lsu_perf_release_beats1_opdata = nodeOut_c_bits_opcode[0]; // @[Edges.scala:102:36] wire [8:0] io_lsu_perf_release_beats1 = io_lsu_perf_release_beats1_opdata ? io_lsu_perf_release_beats1_decode : 9'h0; // @[Edges.scala:102:36, :220:59, :221:14] reg [8:0] io_lsu_perf_release_counter; // @[Edges.scala:229:27] wire [9:0] _io_lsu_perf_release_counter1_T = {1'h0, io_lsu_perf_release_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] io_lsu_perf_release_counter1 = _io_lsu_perf_release_counter1_T[8:0]; // @[Edges.scala:230:28] wire io_lsu_perf_release_first = io_lsu_perf_release_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _io_lsu_perf_release_last_T = io_lsu_perf_release_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _io_lsu_perf_release_last_T_1 = io_lsu_perf_release_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire io_lsu_perf_release_last = _io_lsu_perf_release_last_T | _io_lsu_perf_release_last_T_1; // @[Edges.scala:232:{25,33,43}] assign io_lsu_perf_release_done = io_lsu_perf_release_last & _io_lsu_perf_release_T; // @[Decoupled.scala:51:35] assign io_lsu_perf_release_0 = io_lsu_perf_release_done; // @[Edges.scala:233:22] wire [8:0] _io_lsu_perf_release_count_T = ~io_lsu_perf_release_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] io_lsu_perf_release_count = io_lsu_perf_release_beats1 & _io_lsu_perf_release_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _io_lsu_perf_release_counter_T = io_lsu_perf_release_first ? io_lsu_perf_release_beats1 : io_lsu_perf_release_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire _io_lsu_perf_acquire_T = nodeOut_a_ready & nodeOut_a_valid; // @[Decoupled.scala:51:35] wire [26:0] _io_lsu_perf_acquire_beats1_decode_T = 27'hFFF << nodeOut_a_bits_size; // @[package.scala:243:71] wire [11:0] _io_lsu_perf_acquire_beats1_decode_T_1 = _io_lsu_perf_acquire_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _io_lsu_perf_acquire_beats1_decode_T_2 = ~_io_lsu_perf_acquire_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] io_lsu_perf_acquire_beats1_decode = _io_lsu_perf_acquire_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire _io_lsu_perf_acquire_beats1_opdata_T = nodeOut_a_bits_opcode[2]; // @[Edges.scala:92:37] wire io_lsu_perf_acquire_beats1_opdata = ~_io_lsu_perf_acquire_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [8:0] io_lsu_perf_acquire_beats1 = io_lsu_perf_acquire_beats1_opdata ? io_lsu_perf_acquire_beats1_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [8:0] io_lsu_perf_acquire_counter; // @[Edges.scala:229:27] wire [9:0] _io_lsu_perf_acquire_counter1_T = {1'h0, io_lsu_perf_acquire_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] io_lsu_perf_acquire_counter1 = _io_lsu_perf_acquire_counter1_T[8:0]; // @[Edges.scala:230:28] wire io_lsu_perf_acquire_first = io_lsu_perf_acquire_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _io_lsu_perf_acquire_last_T = io_lsu_perf_acquire_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _io_lsu_perf_acquire_last_T_1 = io_lsu_perf_acquire_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire io_lsu_perf_acquire_last = _io_lsu_perf_acquire_last_T | _io_lsu_perf_acquire_last_T_1; // @[Edges.scala:232:{25,33,43}] assign io_lsu_perf_acquire_done = io_lsu_perf_acquire_last & _io_lsu_perf_acquire_T; // @[Decoupled.scala:51:35] assign io_lsu_perf_acquire_0 = io_lsu_perf_acquire_done; // @[Edges.scala:233:22] wire [8:0] _io_lsu_perf_acquire_count_T = ~io_lsu_perf_acquire_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] io_lsu_perf_acquire_count = io_lsu_perf_acquire_beats1 & _io_lsu_perf_acquire_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _io_lsu_perf_acquire_counter_T = io_lsu_perf_acquire_first ? io_lsu_perf_acquire_beats1 : io_lsu_perf_acquire_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [63:0] s2_data_word_prebypass_0 = _s2_data_word_prebypass_T_1; // @[dcache.scala:427:49, :825:63] wire [63:0] _s2_data_word_0_T_2; // @[dcache.scala:891:27] wire [63:0] s2_data_word_0; // @[dcache.scala:826:26] wire [63:0] size_dat_padded = s2_data_word_0; // @[AMOALU.scala:13:27] wire _cache_resp_0_valid_T; // @[dcache.scala:835:48] wire [63:0] _cache_resp_0_bits_data_T_24; // @[dcache.scala:837:52] wire [63:0] cache_resp_0_bits_data; // @[dcache.scala:833:26] wire cache_resp_0_valid; // @[dcache.scala:833:26] assign _cache_resp_0_valid_T = s2_valid_0 & s2_send_resp_0; // @[dcache.scala:427:49, :835:48] assign cache_resp_0_valid = _cache_resp_0_valid_T; // @[dcache.scala:833:26, :835:48] wire _cache_resp_0_bits_data_shifted_T = s2_req_0_addr[2]; // @[AMOALU.scala:42:29] wire _amoalu_io_mask_upper_T_8 = s2_req_0_addr[2]; // @[AMOALU.scala:20:27, :42:29] wire _amoalu_io_mask_lower_T_2 = s2_req_0_addr[2]; // @[AMOALU.scala:21:27, :42:29] wire [31:0] _cache_resp_0_bits_data_shifted_T_1 = s2_data_word_0[63:32]; // @[AMOALU.scala:42:37] wire [31:0] _cache_resp_0_bits_data_T_5 = s2_data_word_0[63:32]; // @[AMOALU.scala:42:37, :45:94] wire [31:0] _cache_resp_0_bits_data_shifted_T_2 = s2_data_word_0[31:0]; // @[AMOALU.scala:42:55] wire [31:0] cache_resp_0_bits_data_shifted = _cache_resp_0_bits_data_shifted_T ? _cache_resp_0_bits_data_shifted_T_1 : _cache_resp_0_bits_data_shifted_T_2; // @[AMOALU.scala:42:{24,29,37,55}] wire [31:0] cache_resp_0_bits_data_zeroed = cache_resp_0_bits_data_shifted; // @[AMOALU.scala:42:24, :44:23] wire _cache_resp_0_bits_data_T = size == 2'h2; // @[AMOALU.scala:11:18, :45:26] wire _cache_resp_0_bits_data_T_1 = _cache_resp_0_bits_data_T; // @[AMOALU.scala:45:{26,34}] wire _cache_resp_0_bits_data_T_2 = cache_resp_0_bits_data_zeroed[31]; // @[AMOALU.scala:44:23, :45:81] wire _cache_resp_0_bits_data_T_3 = s2_req_0_uop_mem_signed & _cache_resp_0_bits_data_T_2; // @[AMOALU.scala:45:{72,81}] wire [31:0] _cache_resp_0_bits_data_T_4 = {32{_cache_resp_0_bits_data_T_3}}; // @[AMOALU.scala:45:{49,72}] wire [31:0] _cache_resp_0_bits_data_T_6 = _cache_resp_0_bits_data_T_1 ? _cache_resp_0_bits_data_T_4 : _cache_resp_0_bits_data_T_5; // @[AMOALU.scala:45:{20,34,49,94}] wire [63:0] _cache_resp_0_bits_data_T_7 = {_cache_resp_0_bits_data_T_6, cache_resp_0_bits_data_zeroed}; // @[AMOALU.scala:44:23, :45:{16,20}] wire _cache_resp_0_bits_data_shifted_T_3 = s2_req_0_addr[1]; // @[AMOALU.scala:42:29] wire _amoalu_io_mask_upper_T_4 = s2_req_0_addr[1]; // @[AMOALU.scala:20:27, :42:29] wire _amoalu_io_mask_lower_T_1 = s2_req_0_addr[1]; // @[AMOALU.scala:21:27, :42:29] wire [15:0] _cache_resp_0_bits_data_shifted_T_4 = _cache_resp_0_bits_data_T_7[31:16]; // @[AMOALU.scala:42:37, :45:16] wire [15:0] _cache_resp_0_bits_data_shifted_T_5 = _cache_resp_0_bits_data_T_7[15:0]; // @[AMOALU.scala:42:55, :45:16] wire [15:0] cache_resp_0_bits_data_shifted_1 = _cache_resp_0_bits_data_shifted_T_3 ? _cache_resp_0_bits_data_shifted_T_4 : _cache_resp_0_bits_data_shifted_T_5; // @[AMOALU.scala:42:{24,29,37,55}] wire [15:0] cache_resp_0_bits_data_zeroed_1 = cache_resp_0_bits_data_shifted_1; // @[AMOALU.scala:42:24, :44:23] wire _cache_resp_0_bits_data_T_8 = size == 2'h1; // @[AMOALU.scala:11:18, :45:26] wire _cache_resp_0_bits_data_T_9 = _cache_resp_0_bits_data_T_8; // @[AMOALU.scala:45:{26,34}] wire _cache_resp_0_bits_data_T_10 = cache_resp_0_bits_data_zeroed_1[15]; // @[AMOALU.scala:44:23, :45:81] wire _cache_resp_0_bits_data_T_11 = s2_req_0_uop_mem_signed & _cache_resp_0_bits_data_T_10; // @[AMOALU.scala:45:{72,81}] wire [47:0] _cache_resp_0_bits_data_T_12 = {48{_cache_resp_0_bits_data_T_11}}; // @[AMOALU.scala:45:{49,72}] wire [47:0] _cache_resp_0_bits_data_T_13 = _cache_resp_0_bits_data_T_7[63:16]; // @[AMOALU.scala:45:{16,94}] wire [47:0] _cache_resp_0_bits_data_T_14 = _cache_resp_0_bits_data_T_9 ? _cache_resp_0_bits_data_T_12 : _cache_resp_0_bits_data_T_13; // @[AMOALU.scala:45:{20,34,49,94}] wire [63:0] _cache_resp_0_bits_data_T_15 = {_cache_resp_0_bits_data_T_14, cache_resp_0_bits_data_zeroed_1}; // @[AMOALU.scala:44:23, :45:{16,20}] wire _cache_resp_0_bits_data_shifted_T_6 = s2_req_0_addr[0]; // @[AMOALU.scala:42:29] wire _amoalu_io_mask_upper_T = s2_req_0_addr[0]; // @[AMOALU.scala:20:27, :42:29] wire _amoalu_io_mask_lower_T = s2_req_0_addr[0]; // @[AMOALU.scala:21:27, :42:29] wire [7:0] _cache_resp_0_bits_data_shifted_T_7 = _cache_resp_0_bits_data_T_15[15:8]; // @[AMOALU.scala:42:37, :45:16] wire [7:0] _cache_resp_0_bits_data_shifted_T_8 = _cache_resp_0_bits_data_T_15[7:0]; // @[AMOALU.scala:42:55, :45:16] wire [7:0] cache_resp_0_bits_data_shifted_2 = _cache_resp_0_bits_data_shifted_T_6 ? _cache_resp_0_bits_data_shifted_T_7 : _cache_resp_0_bits_data_shifted_T_8; // @[AMOALU.scala:42:{24,29,37,55}] wire [7:0] cache_resp_0_bits_data_zeroed_2 = cache_resp_0_bits_data_doZero_2 ? 8'h0 : cache_resp_0_bits_data_shifted_2; // @[AMOALU.scala:42:24, :43:31, :44:23] wire _cache_resp_0_bits_data_T_16 = size == 2'h0; // @[AMOALU.scala:11:18, :45:26] wire _cache_resp_0_bits_data_T_17 = _cache_resp_0_bits_data_T_16 | cache_resp_0_bits_data_doZero_2; // @[AMOALU.scala:43:31, :45:{26,34}] wire _cache_resp_0_bits_data_T_18 = cache_resp_0_bits_data_zeroed_2[7]; // @[AMOALU.scala:44:23, :45:81] wire _cache_resp_0_bits_data_T_19 = s2_req_0_uop_mem_signed & _cache_resp_0_bits_data_T_18; // @[AMOALU.scala:45:{72,81}] wire [55:0] _cache_resp_0_bits_data_T_20 = {56{_cache_resp_0_bits_data_T_19}}; // @[AMOALU.scala:45:{49,72}] wire [55:0] _cache_resp_0_bits_data_T_21 = _cache_resp_0_bits_data_T_15[63:8]; // @[AMOALU.scala:45:{16,94}] wire [55:0] _cache_resp_0_bits_data_T_22 = _cache_resp_0_bits_data_T_17 ? _cache_resp_0_bits_data_T_20 : _cache_resp_0_bits_data_T_21; // @[AMOALU.scala:45:{20,34,49,94}] wire [63:0] _cache_resp_0_bits_data_T_23 = {_cache_resp_0_bits_data_T_22, cache_resp_0_bits_data_zeroed_2}; // @[AMOALU.scala:44:23, :45:{16,20}] assign _cache_resp_0_bits_data_T_24 = {_cache_resp_0_bits_data_T_23[63:1], _cache_resp_0_bits_data_T_23[0] | s2_sc_fail}; // @[AMOALU.scala:45:16] assign cache_resp_0_bits_data = _cache_resp_0_bits_data_T_24; // @[dcache.scala:833:26, :837:52] wire [3:0] uncache_resp_bits_uop_ctrl_br_type; // @[dcache.scala:841:26] wire [1:0] uncache_resp_bits_uop_ctrl_op1_sel; // @[dcache.scala:841:26] wire [2:0] uncache_resp_bits_uop_ctrl_op2_sel; // @[dcache.scala:841:26] wire [2:0] uncache_resp_bits_uop_ctrl_imm_sel; // @[dcache.scala:841:26] wire [4:0] uncache_resp_bits_uop_ctrl_op_fcn; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_ctrl_fcn_dw; // @[dcache.scala:841:26] wire [2:0] uncache_resp_bits_uop_ctrl_csr_cmd; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_ctrl_is_load; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_ctrl_is_sta; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_ctrl_is_std; // @[dcache.scala:841:26] wire [6:0] uncache_resp_bits_uop_uopc; // @[dcache.scala:841:26] wire [31:0] uncache_resp_bits_uop_inst; // @[dcache.scala:841:26] wire [31:0] uncache_resp_bits_uop_debug_inst; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_is_rvc; // @[dcache.scala:841:26] wire [39:0] uncache_resp_bits_uop_debug_pc; // @[dcache.scala:841:26] wire [2:0] uncache_resp_bits_uop_iq_type; // @[dcache.scala:841:26] wire [9:0] uncache_resp_bits_uop_fu_code; // @[dcache.scala:841:26] wire [1:0] uncache_resp_bits_uop_iw_state; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_iw_p1_poisoned; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_iw_p2_poisoned; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_is_br; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_is_jalr; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_is_jal; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_is_sfb; // @[dcache.scala:841:26] wire [7:0] uncache_resp_bits_uop_br_mask; // @[dcache.scala:841:26] wire [2:0] uncache_resp_bits_uop_br_tag; // @[dcache.scala:841:26] wire [3:0] uncache_resp_bits_uop_ftq_idx; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_edge_inst; // @[dcache.scala:841:26] wire [5:0] uncache_resp_bits_uop_pc_lob; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_taken; // @[dcache.scala:841:26] wire [19:0] uncache_resp_bits_uop_imm_packed; // @[dcache.scala:841:26] wire [11:0] uncache_resp_bits_uop_csr_addr; // @[dcache.scala:841:26] wire [4:0] uncache_resp_bits_uop_rob_idx; // @[dcache.scala:841:26] wire [2:0] uncache_resp_bits_uop_ldq_idx; // @[dcache.scala:841:26] wire [2:0] uncache_resp_bits_uop_stq_idx; // @[dcache.scala:841:26] wire [1:0] uncache_resp_bits_uop_rxq_idx; // @[dcache.scala:841:26] wire [5:0] uncache_resp_bits_uop_pdst; // @[dcache.scala:841:26] wire [5:0] uncache_resp_bits_uop_prs1; // @[dcache.scala:841:26] wire [5:0] uncache_resp_bits_uop_prs2; // @[dcache.scala:841:26] wire [5:0] uncache_resp_bits_uop_prs3; // @[dcache.scala:841:26] wire [3:0] uncache_resp_bits_uop_ppred; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_prs1_busy; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_prs2_busy; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_prs3_busy; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_ppred_busy; // @[dcache.scala:841:26] wire [5:0] uncache_resp_bits_uop_stale_pdst; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_exception; // @[dcache.scala:841:26] wire [63:0] uncache_resp_bits_uop_exc_cause; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_bypassable; // @[dcache.scala:841:26] wire [4:0] uncache_resp_bits_uop_mem_cmd; // @[dcache.scala:841:26] wire [1:0] uncache_resp_bits_uop_mem_size; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_mem_signed; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_is_fence; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_is_fencei; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_is_amo; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_uses_ldq; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_uses_stq; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_is_sys_pc2epc; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_is_unique; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_flush_on_commit; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_ldst_is_rs1; // @[dcache.scala:841:26] wire [5:0] uncache_resp_bits_uop_ldst; // @[dcache.scala:841:26] wire [5:0] uncache_resp_bits_uop_lrs1; // @[dcache.scala:841:26] wire [5:0] uncache_resp_bits_uop_lrs2; // @[dcache.scala:841:26] wire [5:0] uncache_resp_bits_uop_lrs3; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_ldst_val; // @[dcache.scala:841:26] wire [1:0] uncache_resp_bits_uop_dst_rtype; // @[dcache.scala:841:26] wire [1:0] uncache_resp_bits_uop_lrs1_rtype; // @[dcache.scala:841:26] wire [1:0] uncache_resp_bits_uop_lrs2_rtype; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_frs3_en; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_fp_val; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_fp_single; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_xcpt_pf_if; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_xcpt_ae_if; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_xcpt_ma_if; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_bp_debug_if; // @[dcache.scala:841:26] wire uncache_resp_bits_uop_bp_xcpt_if; // @[dcache.scala:841:26] wire [1:0] uncache_resp_bits_uop_debug_fsrc; // @[dcache.scala:841:26] wire [1:0] uncache_resp_bits_uop_debug_tsrc; // @[dcache.scala:841:26] wire [63:0] uncache_resp_bits_data; // @[dcache.scala:841:26] wire uncache_resp_bits_is_hella; // @[dcache.scala:841:26] wire uncache_resp_valid; // @[dcache.scala:841:26] wire _mshrs_io_resp_ready_T = ~cache_resp_0_valid; // @[dcache.scala:833:26, :844:26] assign io_lsu_resp_0_bits_out_uop_uopc = resp_0_bits_uop_uopc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_inst = resp_0_bits_uop_inst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_debug_inst = resp_0_bits_uop_debug_inst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_is_rvc = resp_0_bits_uop_is_rvc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_debug_pc = resp_0_bits_uop_debug_pc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_iq_type = resp_0_bits_uop_iq_type; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_fu_code = resp_0_bits_uop_fu_code; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ctrl_br_type = resp_0_bits_uop_ctrl_br_type; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ctrl_op1_sel = resp_0_bits_uop_ctrl_op1_sel; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ctrl_op2_sel = resp_0_bits_uop_ctrl_op2_sel; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ctrl_imm_sel = resp_0_bits_uop_ctrl_imm_sel; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ctrl_op_fcn = resp_0_bits_uop_ctrl_op_fcn; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ctrl_fcn_dw = resp_0_bits_uop_ctrl_fcn_dw; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ctrl_csr_cmd = resp_0_bits_uop_ctrl_csr_cmd; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ctrl_is_load = resp_0_bits_uop_ctrl_is_load; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ctrl_is_sta = resp_0_bits_uop_ctrl_is_sta; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ctrl_is_std = resp_0_bits_uop_ctrl_is_std; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_iw_state = resp_0_bits_uop_iw_state; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_iw_p1_poisoned = resp_0_bits_uop_iw_p1_poisoned; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_iw_p2_poisoned = resp_0_bits_uop_iw_p2_poisoned; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_is_br = resp_0_bits_uop_is_br; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_is_jalr = resp_0_bits_uop_is_jalr; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_is_jal = resp_0_bits_uop_is_jal; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_is_sfb = resp_0_bits_uop_is_sfb; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_br_tag = resp_0_bits_uop_br_tag; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ftq_idx = resp_0_bits_uop_ftq_idx; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_edge_inst = resp_0_bits_uop_edge_inst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_pc_lob = resp_0_bits_uop_pc_lob; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_taken = resp_0_bits_uop_taken; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_imm_packed = resp_0_bits_uop_imm_packed; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_csr_addr = resp_0_bits_uop_csr_addr; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_rob_idx = resp_0_bits_uop_rob_idx; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ldq_idx = resp_0_bits_uop_ldq_idx; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_stq_idx = resp_0_bits_uop_stq_idx; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_rxq_idx = resp_0_bits_uop_rxq_idx; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_pdst = resp_0_bits_uop_pdst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_prs1 = resp_0_bits_uop_prs1; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_prs2 = resp_0_bits_uop_prs2; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_prs3 = resp_0_bits_uop_prs3; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ppred = resp_0_bits_uop_ppred; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_prs1_busy = resp_0_bits_uop_prs1_busy; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_prs2_busy = resp_0_bits_uop_prs2_busy; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_prs3_busy = resp_0_bits_uop_prs3_busy; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ppred_busy = resp_0_bits_uop_ppred_busy; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_stale_pdst = resp_0_bits_uop_stale_pdst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_exception = resp_0_bits_uop_exception; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_exc_cause = resp_0_bits_uop_exc_cause; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_bypassable = resp_0_bits_uop_bypassable; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_mem_cmd = resp_0_bits_uop_mem_cmd; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_mem_size = resp_0_bits_uop_mem_size; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_mem_signed = resp_0_bits_uop_mem_signed; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_is_fence = resp_0_bits_uop_is_fence; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_is_fencei = resp_0_bits_uop_is_fencei; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_is_amo = resp_0_bits_uop_is_amo; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_uses_ldq = resp_0_bits_uop_uses_ldq; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_uses_stq = resp_0_bits_uop_uses_stq; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_is_sys_pc2epc = resp_0_bits_uop_is_sys_pc2epc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_is_unique = resp_0_bits_uop_is_unique; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_flush_on_commit = resp_0_bits_uop_flush_on_commit; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ldst_is_rs1 = resp_0_bits_uop_ldst_is_rs1; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ldst = resp_0_bits_uop_ldst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_lrs1 = resp_0_bits_uop_lrs1; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_lrs2 = resp_0_bits_uop_lrs2; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_lrs3 = resp_0_bits_uop_lrs3; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_ldst_val = resp_0_bits_uop_ldst_val; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_dst_rtype = resp_0_bits_uop_dst_rtype; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_lrs1_rtype = resp_0_bits_uop_lrs1_rtype; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_lrs2_rtype = resp_0_bits_uop_lrs2_rtype; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_frs3_en = resp_0_bits_uop_frs3_en; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_fp_val = resp_0_bits_uop_fp_val; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_fp_single = resp_0_bits_uop_fp_single; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_xcpt_pf_if = resp_0_bits_uop_xcpt_pf_if; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_xcpt_ae_if = resp_0_bits_uop_xcpt_ae_if; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_xcpt_ma_if = resp_0_bits_uop_xcpt_ma_if; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_bp_debug_if = resp_0_bits_uop_bp_debug_if; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_bp_xcpt_if = resp_0_bits_uop_bp_xcpt_if; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_debug_fsrc = resp_0_bits_uop_debug_fsrc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_uop_debug_tsrc = resp_0_bits_uop_debug_tsrc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_data = resp_0_bits_data; // @[util.scala:101:23] assign io_lsu_resp_0_bits_out_is_hella = resp_0_bits_is_hella; // @[util.scala:101:23] wire [7:0] resp_0_bits_uop_br_mask; // @[dcache.scala:846:22] wire resp_0_valid; // @[dcache.scala:846:22] wire _uncache_respond_T = ~cache_resp_0_valid; // @[dcache.scala:833:26, :844:26, :849:27] wire uncache_respond = _uncache_respond_T; // @[dcache.scala:849:{27,48}] assign resp_0_valid = uncache_respond ? uncache_resp_valid : cache_resp_0_valid; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_uopc = uncache_respond ? uncache_resp_bits_uop_uopc : cache_resp_0_bits_uop_uopc; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_inst = uncache_respond ? uncache_resp_bits_uop_inst : cache_resp_0_bits_uop_inst; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_debug_inst = uncache_respond ? uncache_resp_bits_uop_debug_inst : cache_resp_0_bits_uop_debug_inst; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_is_rvc = uncache_respond ? uncache_resp_bits_uop_is_rvc : cache_resp_0_bits_uop_is_rvc; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_debug_pc = uncache_respond ? uncache_resp_bits_uop_debug_pc : cache_resp_0_bits_uop_debug_pc; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_iq_type = uncache_respond ? uncache_resp_bits_uop_iq_type : cache_resp_0_bits_uop_iq_type; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_fu_code = uncache_respond ? uncache_resp_bits_uop_fu_code : cache_resp_0_bits_uop_fu_code; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ctrl_br_type = uncache_respond ? uncache_resp_bits_uop_ctrl_br_type : cache_resp_0_bits_uop_ctrl_br_type; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ctrl_op1_sel = uncache_respond ? uncache_resp_bits_uop_ctrl_op1_sel : cache_resp_0_bits_uop_ctrl_op1_sel; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ctrl_op2_sel = uncache_respond ? uncache_resp_bits_uop_ctrl_op2_sel : cache_resp_0_bits_uop_ctrl_op2_sel; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ctrl_imm_sel = uncache_respond ? uncache_resp_bits_uop_ctrl_imm_sel : cache_resp_0_bits_uop_ctrl_imm_sel; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ctrl_op_fcn = uncache_respond ? uncache_resp_bits_uop_ctrl_op_fcn : cache_resp_0_bits_uop_ctrl_op_fcn; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ctrl_fcn_dw = uncache_respond ? uncache_resp_bits_uop_ctrl_fcn_dw : cache_resp_0_bits_uop_ctrl_fcn_dw; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ctrl_csr_cmd = uncache_respond ? uncache_resp_bits_uop_ctrl_csr_cmd : cache_resp_0_bits_uop_ctrl_csr_cmd; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ctrl_is_load = uncache_respond ? uncache_resp_bits_uop_ctrl_is_load : cache_resp_0_bits_uop_ctrl_is_load; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ctrl_is_sta = uncache_respond ? uncache_resp_bits_uop_ctrl_is_sta : cache_resp_0_bits_uop_ctrl_is_sta; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ctrl_is_std = uncache_respond ? uncache_resp_bits_uop_ctrl_is_std : cache_resp_0_bits_uop_ctrl_is_std; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_iw_state = uncache_respond ? uncache_resp_bits_uop_iw_state : cache_resp_0_bits_uop_iw_state; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_iw_p1_poisoned = uncache_respond ? uncache_resp_bits_uop_iw_p1_poisoned : cache_resp_0_bits_uop_iw_p1_poisoned; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_iw_p2_poisoned = uncache_respond ? uncache_resp_bits_uop_iw_p2_poisoned : cache_resp_0_bits_uop_iw_p2_poisoned; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_is_br = uncache_respond ? uncache_resp_bits_uop_is_br : cache_resp_0_bits_uop_is_br; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_is_jalr = uncache_respond ? uncache_resp_bits_uop_is_jalr : cache_resp_0_bits_uop_is_jalr; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_is_jal = uncache_respond ? uncache_resp_bits_uop_is_jal : cache_resp_0_bits_uop_is_jal; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_is_sfb = uncache_respond ? uncache_resp_bits_uop_is_sfb : cache_resp_0_bits_uop_is_sfb; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_br_mask = uncache_respond ? uncache_resp_bits_uop_br_mask : cache_resp_0_bits_uop_br_mask; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_br_tag = uncache_respond ? uncache_resp_bits_uop_br_tag : cache_resp_0_bits_uop_br_tag; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ftq_idx = uncache_respond ? uncache_resp_bits_uop_ftq_idx : cache_resp_0_bits_uop_ftq_idx; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_edge_inst = uncache_respond ? uncache_resp_bits_uop_edge_inst : cache_resp_0_bits_uop_edge_inst; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_pc_lob = uncache_respond ? uncache_resp_bits_uop_pc_lob : cache_resp_0_bits_uop_pc_lob; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_taken = uncache_respond ? uncache_resp_bits_uop_taken : cache_resp_0_bits_uop_taken; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_imm_packed = uncache_respond ? uncache_resp_bits_uop_imm_packed : cache_resp_0_bits_uop_imm_packed; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_csr_addr = uncache_respond ? uncache_resp_bits_uop_csr_addr : cache_resp_0_bits_uop_csr_addr; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_rob_idx = uncache_respond ? uncache_resp_bits_uop_rob_idx : cache_resp_0_bits_uop_rob_idx; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ldq_idx = uncache_respond ? uncache_resp_bits_uop_ldq_idx : cache_resp_0_bits_uop_ldq_idx; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_stq_idx = uncache_respond ? uncache_resp_bits_uop_stq_idx : cache_resp_0_bits_uop_stq_idx; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_rxq_idx = uncache_respond ? uncache_resp_bits_uop_rxq_idx : cache_resp_0_bits_uop_rxq_idx; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_pdst = uncache_respond ? uncache_resp_bits_uop_pdst : cache_resp_0_bits_uop_pdst; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_prs1 = uncache_respond ? uncache_resp_bits_uop_prs1 : cache_resp_0_bits_uop_prs1; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_prs2 = uncache_respond ? uncache_resp_bits_uop_prs2 : cache_resp_0_bits_uop_prs2; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_prs3 = uncache_respond ? uncache_resp_bits_uop_prs3 : cache_resp_0_bits_uop_prs3; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ppred = uncache_respond ? uncache_resp_bits_uop_ppred : cache_resp_0_bits_uop_ppred; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_prs1_busy = uncache_respond ? uncache_resp_bits_uop_prs1_busy : cache_resp_0_bits_uop_prs1_busy; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_prs2_busy = uncache_respond ? uncache_resp_bits_uop_prs2_busy : cache_resp_0_bits_uop_prs2_busy; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_prs3_busy = uncache_respond ? uncache_resp_bits_uop_prs3_busy : cache_resp_0_bits_uop_prs3_busy; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ppred_busy = uncache_respond ? uncache_resp_bits_uop_ppred_busy : cache_resp_0_bits_uop_ppred_busy; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_stale_pdst = uncache_respond ? uncache_resp_bits_uop_stale_pdst : cache_resp_0_bits_uop_stale_pdst; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_exception = uncache_respond ? uncache_resp_bits_uop_exception : cache_resp_0_bits_uop_exception; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_exc_cause = uncache_respond ? uncache_resp_bits_uop_exc_cause : cache_resp_0_bits_uop_exc_cause; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_bypassable = uncache_respond ? uncache_resp_bits_uop_bypassable : cache_resp_0_bits_uop_bypassable; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_mem_cmd = uncache_respond ? uncache_resp_bits_uop_mem_cmd : cache_resp_0_bits_uop_mem_cmd; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_mem_size = uncache_respond ? uncache_resp_bits_uop_mem_size : cache_resp_0_bits_uop_mem_size; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_mem_signed = uncache_respond ? uncache_resp_bits_uop_mem_signed : cache_resp_0_bits_uop_mem_signed; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_is_fence = uncache_respond ? uncache_resp_bits_uop_is_fence : cache_resp_0_bits_uop_is_fence; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_is_fencei = uncache_respond ? uncache_resp_bits_uop_is_fencei : cache_resp_0_bits_uop_is_fencei; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_is_amo = uncache_respond ? uncache_resp_bits_uop_is_amo : cache_resp_0_bits_uop_is_amo; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_uses_ldq = uncache_respond ? uncache_resp_bits_uop_uses_ldq : cache_resp_0_bits_uop_uses_ldq; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_uses_stq = uncache_respond ? uncache_resp_bits_uop_uses_stq : cache_resp_0_bits_uop_uses_stq; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_is_sys_pc2epc = uncache_respond ? uncache_resp_bits_uop_is_sys_pc2epc : cache_resp_0_bits_uop_is_sys_pc2epc; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_is_unique = uncache_respond ? uncache_resp_bits_uop_is_unique : cache_resp_0_bits_uop_is_unique; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_flush_on_commit = uncache_respond ? uncache_resp_bits_uop_flush_on_commit : cache_resp_0_bits_uop_flush_on_commit; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ldst_is_rs1 = uncache_respond ? uncache_resp_bits_uop_ldst_is_rs1 : cache_resp_0_bits_uop_ldst_is_rs1; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ldst = uncache_respond ? uncache_resp_bits_uop_ldst : cache_resp_0_bits_uop_ldst; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_lrs1 = uncache_respond ? uncache_resp_bits_uop_lrs1 : cache_resp_0_bits_uop_lrs1; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_lrs2 = uncache_respond ? uncache_resp_bits_uop_lrs2 : cache_resp_0_bits_uop_lrs2; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_lrs3 = uncache_respond ? uncache_resp_bits_uop_lrs3 : cache_resp_0_bits_uop_lrs3; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_ldst_val = uncache_respond ? uncache_resp_bits_uop_ldst_val : cache_resp_0_bits_uop_ldst_val; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_dst_rtype = uncache_respond ? uncache_resp_bits_uop_dst_rtype : cache_resp_0_bits_uop_dst_rtype; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_lrs1_rtype = uncache_respond ? uncache_resp_bits_uop_lrs1_rtype : cache_resp_0_bits_uop_lrs1_rtype; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_lrs2_rtype = uncache_respond ? uncache_resp_bits_uop_lrs2_rtype : cache_resp_0_bits_uop_lrs2_rtype; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_frs3_en = uncache_respond ? uncache_resp_bits_uop_frs3_en : cache_resp_0_bits_uop_frs3_en; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_fp_val = uncache_respond ? uncache_resp_bits_uop_fp_val : cache_resp_0_bits_uop_fp_val; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_fp_single = uncache_respond ? uncache_resp_bits_uop_fp_single : cache_resp_0_bits_uop_fp_single; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_xcpt_pf_if = uncache_respond ? uncache_resp_bits_uop_xcpt_pf_if : cache_resp_0_bits_uop_xcpt_pf_if; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_xcpt_ae_if = uncache_respond ? uncache_resp_bits_uop_xcpt_ae_if : cache_resp_0_bits_uop_xcpt_ae_if; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_xcpt_ma_if = uncache_respond ? uncache_resp_bits_uop_xcpt_ma_if : cache_resp_0_bits_uop_xcpt_ma_if; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_bp_debug_if = uncache_respond ? uncache_resp_bits_uop_bp_debug_if : cache_resp_0_bits_uop_bp_debug_if; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_bp_xcpt_if = uncache_respond ? uncache_resp_bits_uop_bp_xcpt_if : cache_resp_0_bits_uop_bp_xcpt_if; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_debug_fsrc = uncache_respond ? uncache_resp_bits_uop_debug_fsrc : cache_resp_0_bits_uop_debug_fsrc; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_uop_debug_tsrc = uncache_respond ? uncache_resp_bits_uop_debug_tsrc : cache_resp_0_bits_uop_debug_tsrc; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_data = uncache_respond ? uncache_resp_bits_data : cache_resp_0_bits_data; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] assign resp_0_bits_is_hella = uncache_respond ? uncache_resp_bits_is_hella : cache_resp_0_bits_is_hella; // @[dcache.scala:833:26, :841:26, :846:22, :849:48, :850:28, :851:15] wire _io_lsu_resp_0_valid_T = io_lsu_exception_0 & resp_0_bits_uop_uses_ldq; // @[dcache.scala:413:7, :846:22, :858:48] wire _io_lsu_resp_0_valid_T_1 = ~_io_lsu_resp_0_valid_T; // @[dcache.scala:858:{29,48}] wire _io_lsu_resp_0_valid_T_2 = resp_0_valid & _io_lsu_resp_0_valid_T_1; // @[dcache.scala:846:22, :857:43, :858:29] wire [7:0] _io_lsu_resp_0_valid_T_3 = io_lsu_brupdate_b1_mispredict_mask_0 & resp_0_bits_uop_br_mask; // @[util.scala:118:51] wire _io_lsu_resp_0_valid_T_4 = |_io_lsu_resp_0_valid_T_3; // @[util.scala:118:{51,59}] wire _io_lsu_resp_0_valid_T_5 = ~_io_lsu_resp_0_valid_T_4; // @[util.scala:118:59] assign _io_lsu_resp_0_valid_T_6 = _io_lsu_resp_0_valid_T_2 & _io_lsu_resp_0_valid_T_5; // @[dcache.scala:857:43, :858:78, :859:29] assign io_lsu_resp_0_valid_0 = _io_lsu_resp_0_valid_T_6; // @[dcache.scala:413:7, :858:78] assign io_lsu_resp_0_bits_uop_uopc_0 = io_lsu_resp_0_bits_out_uop_uopc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_inst_0 = io_lsu_resp_0_bits_out_uop_inst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_debug_inst_0 = io_lsu_resp_0_bits_out_uop_debug_inst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_is_rvc_0 = io_lsu_resp_0_bits_out_uop_is_rvc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_debug_pc_0 = io_lsu_resp_0_bits_out_uop_debug_pc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_iq_type_0 = io_lsu_resp_0_bits_out_uop_iq_type; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_fu_code_0 = io_lsu_resp_0_bits_out_uop_fu_code; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ctrl_br_type_0 = io_lsu_resp_0_bits_out_uop_ctrl_br_type; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ctrl_op1_sel_0 = io_lsu_resp_0_bits_out_uop_ctrl_op1_sel; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ctrl_op2_sel_0 = io_lsu_resp_0_bits_out_uop_ctrl_op2_sel; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ctrl_imm_sel_0 = io_lsu_resp_0_bits_out_uop_ctrl_imm_sel; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ctrl_op_fcn_0 = io_lsu_resp_0_bits_out_uop_ctrl_op_fcn; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ctrl_fcn_dw_0 = io_lsu_resp_0_bits_out_uop_ctrl_fcn_dw; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ctrl_csr_cmd_0 = io_lsu_resp_0_bits_out_uop_ctrl_csr_cmd; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ctrl_is_load_0 = io_lsu_resp_0_bits_out_uop_ctrl_is_load; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ctrl_is_sta_0 = io_lsu_resp_0_bits_out_uop_ctrl_is_sta; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ctrl_is_std_0 = io_lsu_resp_0_bits_out_uop_ctrl_is_std; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_iw_state_0 = io_lsu_resp_0_bits_out_uop_iw_state; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_iw_p1_poisoned_0 = io_lsu_resp_0_bits_out_uop_iw_p1_poisoned; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_iw_p2_poisoned_0 = io_lsu_resp_0_bits_out_uop_iw_p2_poisoned; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_is_br_0 = io_lsu_resp_0_bits_out_uop_is_br; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_is_jalr_0 = io_lsu_resp_0_bits_out_uop_is_jalr; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_is_jal_0 = io_lsu_resp_0_bits_out_uop_is_jal; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_is_sfb_0 = io_lsu_resp_0_bits_out_uop_is_sfb; // @[util.scala:101:23] wire [7:0] _io_lsu_resp_0_bits_out_uop_br_mask_T_1; // @[util.scala:89:21] assign io_lsu_resp_0_bits_uop_br_mask_0 = io_lsu_resp_0_bits_out_uop_br_mask; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_br_tag_0 = io_lsu_resp_0_bits_out_uop_br_tag; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ftq_idx_0 = io_lsu_resp_0_bits_out_uop_ftq_idx; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_edge_inst_0 = io_lsu_resp_0_bits_out_uop_edge_inst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_pc_lob_0 = io_lsu_resp_0_bits_out_uop_pc_lob; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_taken_0 = io_lsu_resp_0_bits_out_uop_taken; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_imm_packed_0 = io_lsu_resp_0_bits_out_uop_imm_packed; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_csr_addr_0 = io_lsu_resp_0_bits_out_uop_csr_addr; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_rob_idx_0 = io_lsu_resp_0_bits_out_uop_rob_idx; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ldq_idx_0 = io_lsu_resp_0_bits_out_uop_ldq_idx; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_stq_idx_0 = io_lsu_resp_0_bits_out_uop_stq_idx; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_rxq_idx_0 = io_lsu_resp_0_bits_out_uop_rxq_idx; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_pdst_0 = io_lsu_resp_0_bits_out_uop_pdst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_prs1_0 = io_lsu_resp_0_bits_out_uop_prs1; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_prs2_0 = io_lsu_resp_0_bits_out_uop_prs2; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_prs3_0 = io_lsu_resp_0_bits_out_uop_prs3; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ppred_0 = io_lsu_resp_0_bits_out_uop_ppred; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_prs1_busy_0 = io_lsu_resp_0_bits_out_uop_prs1_busy; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_prs2_busy_0 = io_lsu_resp_0_bits_out_uop_prs2_busy; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_prs3_busy_0 = io_lsu_resp_0_bits_out_uop_prs3_busy; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ppred_busy_0 = io_lsu_resp_0_bits_out_uop_ppred_busy; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_stale_pdst_0 = io_lsu_resp_0_bits_out_uop_stale_pdst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_exception_0 = io_lsu_resp_0_bits_out_uop_exception; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_exc_cause_0 = io_lsu_resp_0_bits_out_uop_exc_cause; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_bypassable_0 = io_lsu_resp_0_bits_out_uop_bypassable; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_mem_cmd_0 = io_lsu_resp_0_bits_out_uop_mem_cmd; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_mem_size_0 = io_lsu_resp_0_bits_out_uop_mem_size; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_mem_signed_0 = io_lsu_resp_0_bits_out_uop_mem_signed; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_is_fence_0 = io_lsu_resp_0_bits_out_uop_is_fence; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_is_fencei_0 = io_lsu_resp_0_bits_out_uop_is_fencei; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_is_amo_0 = io_lsu_resp_0_bits_out_uop_is_amo; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_uses_ldq_0 = io_lsu_resp_0_bits_out_uop_uses_ldq; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_uses_stq_0 = io_lsu_resp_0_bits_out_uop_uses_stq; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_is_sys_pc2epc_0 = io_lsu_resp_0_bits_out_uop_is_sys_pc2epc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_is_unique_0 = io_lsu_resp_0_bits_out_uop_is_unique; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_flush_on_commit_0 = io_lsu_resp_0_bits_out_uop_flush_on_commit; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ldst_is_rs1_0 = io_lsu_resp_0_bits_out_uop_ldst_is_rs1; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ldst_0 = io_lsu_resp_0_bits_out_uop_ldst; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_lrs1_0 = io_lsu_resp_0_bits_out_uop_lrs1; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_lrs2_0 = io_lsu_resp_0_bits_out_uop_lrs2; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_lrs3_0 = io_lsu_resp_0_bits_out_uop_lrs3; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_ldst_val_0 = io_lsu_resp_0_bits_out_uop_ldst_val; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_dst_rtype_0 = io_lsu_resp_0_bits_out_uop_dst_rtype; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_lrs1_rtype_0 = io_lsu_resp_0_bits_out_uop_lrs1_rtype; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_lrs2_rtype_0 = io_lsu_resp_0_bits_out_uop_lrs2_rtype; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_frs3_en_0 = io_lsu_resp_0_bits_out_uop_frs3_en; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_fp_val_0 = io_lsu_resp_0_bits_out_uop_fp_val; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_fp_single_0 = io_lsu_resp_0_bits_out_uop_fp_single; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_xcpt_pf_if_0 = io_lsu_resp_0_bits_out_uop_xcpt_pf_if; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_xcpt_ae_if_0 = io_lsu_resp_0_bits_out_uop_xcpt_ae_if; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_xcpt_ma_if_0 = io_lsu_resp_0_bits_out_uop_xcpt_ma_if; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_bp_debug_if_0 = io_lsu_resp_0_bits_out_uop_bp_debug_if; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_bp_xcpt_if_0 = io_lsu_resp_0_bits_out_uop_bp_xcpt_if; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_debug_fsrc_0 = io_lsu_resp_0_bits_out_uop_debug_fsrc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_uop_debug_tsrc_0 = io_lsu_resp_0_bits_out_uop_debug_tsrc; // @[util.scala:101:23] assign io_lsu_resp_0_bits_data_0 = io_lsu_resp_0_bits_out_data; // @[util.scala:101:23] assign io_lsu_resp_0_bits_is_hella_0 = io_lsu_resp_0_bits_out_is_hella; // @[util.scala:101:23] wire [7:0] _io_lsu_resp_0_bits_out_uop_br_mask_T = ~io_lsu_brupdate_b1_resolve_mask_0; // @[util.scala:85:27, :89:23] assign _io_lsu_resp_0_bits_out_uop_br_mask_T_1 = resp_0_bits_uop_br_mask & _io_lsu_resp_0_bits_out_uop_br_mask_T; // @[util.scala:89:{21,23}] assign io_lsu_resp_0_bits_out_uop_br_mask = _io_lsu_resp_0_bits_out_uop_br_mask_T_1; // @[util.scala:89:21, :101:23] wire _io_lsu_nack_0_valid_T = s2_valid_0 & s2_send_nack_0; // @[dcache.scala:427:49, :862:41] wire _io_lsu_nack_0_valid_T_2 = ~_io_lsu_nack_0_valid_T_1; // @[dcache.scala:863:{29,48}] wire _io_lsu_nack_0_valid_T_3 = _io_lsu_nack_0_valid_T & _io_lsu_nack_0_valid_T_2; // @[dcache.scala:862:{41,60}, :863:29] wire _io_lsu_nack_0_valid_T_5 = |_io_lsu_nack_0_valid_T_4; // @[util.scala:118:{51,59}] wire _io_lsu_nack_0_valid_T_6 = ~_io_lsu_nack_0_valid_T_5; // @[util.scala:118:59] assign _io_lsu_nack_0_valid_T_7 = _io_lsu_nack_0_valid_T_3 & _io_lsu_nack_0_valid_T_6; // @[dcache.scala:862:60, :863:75, :864:29] assign io_lsu_nack_0_valid_0 = _io_lsu_nack_0_valid_T_7; // @[dcache.scala:413:7, :863:75] assign io_lsu_nack_0_bits_uop_uopc_0 = io_lsu_nack_0_bits_out_uop_uopc; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_inst_0 = io_lsu_nack_0_bits_out_uop_inst; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_debug_inst_0 = io_lsu_nack_0_bits_out_uop_debug_inst; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_is_rvc_0 = io_lsu_nack_0_bits_out_uop_is_rvc; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_debug_pc_0 = io_lsu_nack_0_bits_out_uop_debug_pc; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_iq_type_0 = io_lsu_nack_0_bits_out_uop_iq_type; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_fu_code_0 = io_lsu_nack_0_bits_out_uop_fu_code; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ctrl_br_type_0 = io_lsu_nack_0_bits_out_uop_ctrl_br_type; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ctrl_op1_sel_0 = io_lsu_nack_0_bits_out_uop_ctrl_op1_sel; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ctrl_op2_sel_0 = io_lsu_nack_0_bits_out_uop_ctrl_op2_sel; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ctrl_imm_sel_0 = io_lsu_nack_0_bits_out_uop_ctrl_imm_sel; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ctrl_op_fcn_0 = io_lsu_nack_0_bits_out_uop_ctrl_op_fcn; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ctrl_fcn_dw_0 = io_lsu_nack_0_bits_out_uop_ctrl_fcn_dw; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ctrl_csr_cmd_0 = io_lsu_nack_0_bits_out_uop_ctrl_csr_cmd; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ctrl_is_load_0 = io_lsu_nack_0_bits_out_uop_ctrl_is_load; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ctrl_is_sta_0 = io_lsu_nack_0_bits_out_uop_ctrl_is_sta; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ctrl_is_std_0 = io_lsu_nack_0_bits_out_uop_ctrl_is_std; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_iw_state_0 = io_lsu_nack_0_bits_out_uop_iw_state; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_iw_p1_poisoned_0 = io_lsu_nack_0_bits_out_uop_iw_p1_poisoned; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_iw_p2_poisoned_0 = io_lsu_nack_0_bits_out_uop_iw_p2_poisoned; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_is_br_0 = io_lsu_nack_0_bits_out_uop_is_br; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_is_jalr_0 = io_lsu_nack_0_bits_out_uop_is_jalr; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_is_jal_0 = io_lsu_nack_0_bits_out_uop_is_jal; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_is_sfb_0 = io_lsu_nack_0_bits_out_uop_is_sfb; // @[util.scala:101:23] wire [7:0] _io_lsu_nack_0_bits_out_uop_br_mask_T_1; // @[util.scala:89:21] assign io_lsu_nack_0_bits_uop_br_mask_0 = io_lsu_nack_0_bits_out_uop_br_mask; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_br_tag_0 = io_lsu_nack_0_bits_out_uop_br_tag; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ftq_idx_0 = io_lsu_nack_0_bits_out_uop_ftq_idx; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_edge_inst_0 = io_lsu_nack_0_bits_out_uop_edge_inst; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_pc_lob_0 = io_lsu_nack_0_bits_out_uop_pc_lob; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_taken_0 = io_lsu_nack_0_bits_out_uop_taken; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_imm_packed_0 = io_lsu_nack_0_bits_out_uop_imm_packed; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_csr_addr_0 = io_lsu_nack_0_bits_out_uop_csr_addr; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_rob_idx_0 = io_lsu_nack_0_bits_out_uop_rob_idx; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ldq_idx_0 = io_lsu_nack_0_bits_out_uop_ldq_idx; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_stq_idx_0 = io_lsu_nack_0_bits_out_uop_stq_idx; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_rxq_idx_0 = io_lsu_nack_0_bits_out_uop_rxq_idx; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_pdst_0 = io_lsu_nack_0_bits_out_uop_pdst; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_prs1_0 = io_lsu_nack_0_bits_out_uop_prs1; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_prs2_0 = io_lsu_nack_0_bits_out_uop_prs2; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_prs3_0 = io_lsu_nack_0_bits_out_uop_prs3; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ppred_0 = io_lsu_nack_0_bits_out_uop_ppred; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_prs1_busy_0 = io_lsu_nack_0_bits_out_uop_prs1_busy; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_prs2_busy_0 = io_lsu_nack_0_bits_out_uop_prs2_busy; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_prs3_busy_0 = io_lsu_nack_0_bits_out_uop_prs3_busy; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ppred_busy_0 = io_lsu_nack_0_bits_out_uop_ppred_busy; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_stale_pdst_0 = io_lsu_nack_0_bits_out_uop_stale_pdst; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_exception_0 = io_lsu_nack_0_bits_out_uop_exception; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_exc_cause_0 = io_lsu_nack_0_bits_out_uop_exc_cause; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_bypassable_0 = io_lsu_nack_0_bits_out_uop_bypassable; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_mem_cmd_0 = io_lsu_nack_0_bits_out_uop_mem_cmd; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_mem_size_0 = io_lsu_nack_0_bits_out_uop_mem_size; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_mem_signed_0 = io_lsu_nack_0_bits_out_uop_mem_signed; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_is_fence_0 = io_lsu_nack_0_bits_out_uop_is_fence; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_is_fencei_0 = io_lsu_nack_0_bits_out_uop_is_fencei; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_is_amo_0 = io_lsu_nack_0_bits_out_uop_is_amo; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_uses_ldq_0 = io_lsu_nack_0_bits_out_uop_uses_ldq; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_uses_stq_0 = io_lsu_nack_0_bits_out_uop_uses_stq; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_is_sys_pc2epc_0 = io_lsu_nack_0_bits_out_uop_is_sys_pc2epc; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_is_unique_0 = io_lsu_nack_0_bits_out_uop_is_unique; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_flush_on_commit_0 = io_lsu_nack_0_bits_out_uop_flush_on_commit; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ldst_is_rs1_0 = io_lsu_nack_0_bits_out_uop_ldst_is_rs1; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ldst_0 = io_lsu_nack_0_bits_out_uop_ldst; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_lrs1_0 = io_lsu_nack_0_bits_out_uop_lrs1; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_lrs2_0 = io_lsu_nack_0_bits_out_uop_lrs2; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_lrs3_0 = io_lsu_nack_0_bits_out_uop_lrs3; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_ldst_val_0 = io_lsu_nack_0_bits_out_uop_ldst_val; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_dst_rtype_0 = io_lsu_nack_0_bits_out_uop_dst_rtype; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_lrs1_rtype_0 = io_lsu_nack_0_bits_out_uop_lrs1_rtype; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_lrs2_rtype_0 = io_lsu_nack_0_bits_out_uop_lrs2_rtype; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_frs3_en_0 = io_lsu_nack_0_bits_out_uop_frs3_en; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_fp_val_0 = io_lsu_nack_0_bits_out_uop_fp_val; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_fp_single_0 = io_lsu_nack_0_bits_out_uop_fp_single; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_xcpt_pf_if_0 = io_lsu_nack_0_bits_out_uop_xcpt_pf_if; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_xcpt_ae_if_0 = io_lsu_nack_0_bits_out_uop_xcpt_ae_if; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_xcpt_ma_if_0 = io_lsu_nack_0_bits_out_uop_xcpt_ma_if; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_bp_debug_if_0 = io_lsu_nack_0_bits_out_uop_bp_debug_if; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_bp_xcpt_if_0 = io_lsu_nack_0_bits_out_uop_bp_xcpt_if; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_debug_fsrc_0 = io_lsu_nack_0_bits_out_uop_debug_fsrc; // @[util.scala:101:23] assign io_lsu_nack_0_bits_uop_debug_tsrc_0 = io_lsu_nack_0_bits_out_uop_debug_tsrc; // @[util.scala:101:23] assign io_lsu_nack_0_bits_addr_0 = io_lsu_nack_0_bits_out_addr; // @[util.scala:101:23] assign io_lsu_nack_0_bits_data_0 = io_lsu_nack_0_bits_out_data; // @[util.scala:101:23] assign io_lsu_nack_0_bits_is_hella_0 = io_lsu_nack_0_bits_out_is_hella; // @[util.scala:101:23] wire [7:0] _io_lsu_nack_0_bits_out_uop_br_mask_T = ~io_lsu_brupdate_b1_resolve_mask_0; // @[util.scala:85:27, :89:23] assign _io_lsu_nack_0_bits_out_uop_br_mask_T_1 = s2_req_0_uop_br_mask & _io_lsu_nack_0_bits_out_uop_br_mask_T; // @[util.scala:89:{21,23}] assign io_lsu_nack_0_bits_out_uop_br_mask = _io_lsu_nack_0_bits_out_uop_br_mask_T_1; // @[util.scala:89:21, :101:23]
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } }
module BranchKillableQueue_2( // @[util.scala:448:7] input clock, // @[util.scala:448:7] input reset, // @[util.scala:448:7] output io_enq_ready, // @[util.scala:453:14] input io_enq_valid, // @[util.scala:453:14] input [6:0] io_enq_bits_uop_uopc, // @[util.scala:453:14] input [31:0] io_enq_bits_uop_inst, // @[util.scala:453:14] input [31:0] io_enq_bits_uop_debug_inst, // @[util.scala:453:14] input io_enq_bits_uop_is_rvc, // @[util.scala:453:14] input [39:0] io_enq_bits_uop_debug_pc, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_iq_type, // @[util.scala:453:14] input [9:0] io_enq_bits_uop_fu_code, // @[util.scala:453:14] input [3:0] io_enq_bits_uop_ctrl_br_type, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_ctrl_op1_sel, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_ctrl_op2_sel, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_ctrl_imm_sel, // @[util.scala:453:14] input [4:0] io_enq_bits_uop_ctrl_op_fcn, // @[util.scala:453:14] input io_enq_bits_uop_ctrl_fcn_dw, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_ctrl_csr_cmd, // @[util.scala:453:14] input io_enq_bits_uop_ctrl_is_load, // @[util.scala:453:14] input io_enq_bits_uop_ctrl_is_sta, // @[util.scala:453:14] input io_enq_bits_uop_ctrl_is_std, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_iw_state, // @[util.scala:453:14] input io_enq_bits_uop_iw_p1_poisoned, // @[util.scala:453:14] input io_enq_bits_uop_iw_p2_poisoned, // @[util.scala:453:14] input io_enq_bits_uop_is_br, // @[util.scala:453:14] input io_enq_bits_uop_is_jalr, // @[util.scala:453:14] input io_enq_bits_uop_is_jal, // @[util.scala:453:14] input io_enq_bits_uop_is_sfb, // @[util.scala:453:14] input [7:0] io_enq_bits_uop_br_mask, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_br_tag, // @[util.scala:453:14] input [3:0] io_enq_bits_uop_ftq_idx, // @[util.scala:453:14] input io_enq_bits_uop_edge_inst, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_pc_lob, // @[util.scala:453:14] input io_enq_bits_uop_taken, // @[util.scala:453:14] input [19:0] io_enq_bits_uop_imm_packed, // @[util.scala:453:14] input [11:0] io_enq_bits_uop_csr_addr, // @[util.scala:453:14] input [4:0] io_enq_bits_uop_rob_idx, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_ldq_idx, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_stq_idx, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_rxq_idx, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_pdst, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_prs1, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_prs2, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_prs3, // @[util.scala:453:14] input [3:0] io_enq_bits_uop_ppred, // @[util.scala:453:14] input io_enq_bits_uop_prs1_busy, // @[util.scala:453:14] input io_enq_bits_uop_prs2_busy, // @[util.scala:453:14] input io_enq_bits_uop_prs3_busy, // @[util.scala:453:14] input io_enq_bits_uop_ppred_busy, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_stale_pdst, // @[util.scala:453:14] input io_enq_bits_uop_exception, // @[util.scala:453:14] input [63:0] io_enq_bits_uop_exc_cause, // @[util.scala:453:14] input io_enq_bits_uop_bypassable, // @[util.scala:453:14] input [4:0] io_enq_bits_uop_mem_cmd, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_mem_size, // @[util.scala:453:14] input io_enq_bits_uop_mem_signed, // @[util.scala:453:14] input io_enq_bits_uop_is_fence, // @[util.scala:453:14] input io_enq_bits_uop_is_fencei, // @[util.scala:453:14] input io_enq_bits_uop_is_amo, // @[util.scala:453:14] input io_enq_bits_uop_uses_ldq, // @[util.scala:453:14] input io_enq_bits_uop_uses_stq, // @[util.scala:453:14] input io_enq_bits_uop_is_sys_pc2epc, // @[util.scala:453:14] input io_enq_bits_uop_is_unique, // @[util.scala:453:14] input io_enq_bits_uop_flush_on_commit, // @[util.scala:453:14] input io_enq_bits_uop_ldst_is_rs1, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_ldst, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_lrs1, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_lrs2, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_lrs3, // @[util.scala:453:14] input io_enq_bits_uop_ldst_val, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_dst_rtype, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_lrs1_rtype, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_lrs2_rtype, // @[util.scala:453:14] input io_enq_bits_uop_frs3_en, // @[util.scala:453:14] input io_enq_bits_uop_fp_val, // @[util.scala:453:14] input io_enq_bits_uop_fp_single, // @[util.scala:453:14] input io_enq_bits_uop_xcpt_pf_if, // @[util.scala:453:14] input io_enq_bits_uop_xcpt_ae_if, // @[util.scala:453:14] input io_enq_bits_uop_xcpt_ma_if, // @[util.scala:453:14] input io_enq_bits_uop_bp_debug_if, // @[util.scala:453:14] input io_enq_bits_uop_bp_xcpt_if, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_debug_fsrc, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_debug_tsrc, // @[util.scala:453:14] input [63:0] io_enq_bits_data, // @[util.scala:453:14] input io_enq_bits_is_hella, // @[util.scala:453:14] input io_deq_ready, // @[util.scala:453:14] output io_deq_valid, // @[util.scala:453:14] output [6:0] io_deq_bits_uop_uopc, // @[util.scala:453:14] output [31:0] io_deq_bits_uop_inst, // @[util.scala:453:14] output [31:0] io_deq_bits_uop_debug_inst, // @[util.scala:453:14] output io_deq_bits_uop_is_rvc, // @[util.scala:453:14] output [39:0] io_deq_bits_uop_debug_pc, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_iq_type, // @[util.scala:453:14] output [9:0] io_deq_bits_uop_fu_code, // @[util.scala:453:14] output [3:0] io_deq_bits_uop_ctrl_br_type, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_ctrl_op1_sel, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_ctrl_op2_sel, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_ctrl_imm_sel, // @[util.scala:453:14] output [4:0] io_deq_bits_uop_ctrl_op_fcn, // @[util.scala:453:14] output io_deq_bits_uop_ctrl_fcn_dw, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_ctrl_csr_cmd, // @[util.scala:453:14] output io_deq_bits_uop_ctrl_is_load, // @[util.scala:453:14] output io_deq_bits_uop_ctrl_is_sta, // @[util.scala:453:14] output io_deq_bits_uop_ctrl_is_std, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_iw_state, // @[util.scala:453:14] output io_deq_bits_uop_iw_p1_poisoned, // @[util.scala:453:14] output io_deq_bits_uop_iw_p2_poisoned, // @[util.scala:453:14] output io_deq_bits_uop_is_br, // @[util.scala:453:14] output io_deq_bits_uop_is_jalr, // @[util.scala:453:14] output io_deq_bits_uop_is_jal, // @[util.scala:453:14] output io_deq_bits_uop_is_sfb, // @[util.scala:453:14] output [7:0] io_deq_bits_uop_br_mask, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_br_tag, // @[util.scala:453:14] output [3:0] io_deq_bits_uop_ftq_idx, // @[util.scala:453:14] output io_deq_bits_uop_edge_inst, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_pc_lob, // @[util.scala:453:14] output io_deq_bits_uop_taken, // @[util.scala:453:14] output [19:0] io_deq_bits_uop_imm_packed, // @[util.scala:453:14] output [11:0] io_deq_bits_uop_csr_addr, // @[util.scala:453:14] output [4:0] io_deq_bits_uop_rob_idx, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_ldq_idx, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_stq_idx, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_rxq_idx, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_pdst, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_prs1, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_prs2, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_prs3, // @[util.scala:453:14] output [3:0] io_deq_bits_uop_ppred, // @[util.scala:453:14] output io_deq_bits_uop_prs1_busy, // @[util.scala:453:14] output io_deq_bits_uop_prs2_busy, // @[util.scala:453:14] output io_deq_bits_uop_prs3_busy, // @[util.scala:453:14] output io_deq_bits_uop_ppred_busy, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_stale_pdst, // @[util.scala:453:14] output io_deq_bits_uop_exception, // @[util.scala:453:14] output [63:0] io_deq_bits_uop_exc_cause, // @[util.scala:453:14] output io_deq_bits_uop_bypassable, // @[util.scala:453:14] output [4:0] io_deq_bits_uop_mem_cmd, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_mem_size, // @[util.scala:453:14] output io_deq_bits_uop_mem_signed, // @[util.scala:453:14] output io_deq_bits_uop_is_fence, // @[util.scala:453:14] output io_deq_bits_uop_is_fencei, // @[util.scala:453:14] output io_deq_bits_uop_is_amo, // @[util.scala:453:14] output io_deq_bits_uop_uses_ldq, // @[util.scala:453:14] output io_deq_bits_uop_uses_stq, // @[util.scala:453:14] output io_deq_bits_uop_is_sys_pc2epc, // @[util.scala:453:14] output io_deq_bits_uop_is_unique, // @[util.scala:453:14] output io_deq_bits_uop_flush_on_commit, // @[util.scala:453:14] output io_deq_bits_uop_ldst_is_rs1, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_ldst, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_lrs1, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_lrs2, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_lrs3, // @[util.scala:453:14] output io_deq_bits_uop_ldst_val, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_dst_rtype, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_lrs1_rtype, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_lrs2_rtype, // @[util.scala:453:14] output io_deq_bits_uop_frs3_en, // @[util.scala:453:14] output io_deq_bits_uop_fp_val, // @[util.scala:453:14] output io_deq_bits_uop_fp_single, // @[util.scala:453:14] output io_deq_bits_uop_xcpt_pf_if, // @[util.scala:453:14] output io_deq_bits_uop_xcpt_ae_if, // @[util.scala:453:14] output io_deq_bits_uop_xcpt_ma_if, // @[util.scala:453:14] output io_deq_bits_uop_bp_debug_if, // @[util.scala:453:14] output io_deq_bits_uop_bp_xcpt_if, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_debug_fsrc, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_debug_tsrc, // @[util.scala:453:14] output [63:0] io_deq_bits_data, // @[util.scala:453:14] output io_deq_bits_is_hella, // @[util.scala:453:14] input [7:0] io_brupdate_b1_resolve_mask, // @[util.scala:453:14] input [7:0] io_brupdate_b1_mispredict_mask, // @[util.scala:453:14] input [6:0] io_brupdate_b2_uop_uopc, // @[util.scala:453:14] input [31:0] io_brupdate_b2_uop_inst, // @[util.scala:453:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[util.scala:453:14] input io_brupdate_b2_uop_is_rvc, // @[util.scala:453:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[util.scala:453:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[util.scala:453:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[util.scala:453:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[util.scala:453:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[util.scala:453:14] input io_brupdate_b2_uop_ctrl_is_load, // @[util.scala:453:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[util.scala:453:14] input io_brupdate_b2_uop_ctrl_is_std, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[util.scala:453:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[util.scala:453:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[util.scala:453:14] input io_brupdate_b2_uop_is_br, // @[util.scala:453:14] input io_brupdate_b2_uop_is_jalr, // @[util.scala:453:14] input io_brupdate_b2_uop_is_jal, // @[util.scala:453:14] input io_brupdate_b2_uop_is_sfb, // @[util.scala:453:14] input [7:0] io_brupdate_b2_uop_br_mask, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_br_tag, // @[util.scala:453:14] input [3:0] io_brupdate_b2_uop_ftq_idx, // @[util.scala:453:14] input io_brupdate_b2_uop_edge_inst, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[util.scala:453:14] input io_brupdate_b2_uop_taken, // @[util.scala:453:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[util.scala:453:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[util.scala:453:14] input [4:0] io_brupdate_b2_uop_rob_idx, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_ldq_idx, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_stq_idx, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_pdst, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_prs1, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_prs2, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_prs3, // @[util.scala:453:14] input [3:0] io_brupdate_b2_uop_ppred, // @[util.scala:453:14] input io_brupdate_b2_uop_prs1_busy, // @[util.scala:453:14] input io_brupdate_b2_uop_prs2_busy, // @[util.scala:453:14] input io_brupdate_b2_uop_prs3_busy, // @[util.scala:453:14] input io_brupdate_b2_uop_ppred_busy, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_stale_pdst, // @[util.scala:453:14] input io_brupdate_b2_uop_exception, // @[util.scala:453:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[util.scala:453:14] input io_brupdate_b2_uop_bypassable, // @[util.scala:453:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[util.scala:453:14] input io_brupdate_b2_uop_mem_signed, // @[util.scala:453:14] input io_brupdate_b2_uop_is_fence, // @[util.scala:453:14] input io_brupdate_b2_uop_is_fencei, // @[util.scala:453:14] input io_brupdate_b2_uop_is_amo, // @[util.scala:453:14] input io_brupdate_b2_uop_uses_ldq, // @[util.scala:453:14] input io_brupdate_b2_uop_uses_stq, // @[util.scala:453:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[util.scala:453:14] input io_brupdate_b2_uop_is_unique, // @[util.scala:453:14] input io_brupdate_b2_uop_flush_on_commit, // @[util.scala:453:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_ldst, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[util.scala:453:14] input io_brupdate_b2_uop_ldst_val, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[util.scala:453:14] input io_brupdate_b2_uop_frs3_en, // @[util.scala:453:14] input io_brupdate_b2_uop_fp_val, // @[util.scala:453:14] input io_brupdate_b2_uop_fp_single, // @[util.scala:453:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[util.scala:453:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[util.scala:453:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[util.scala:453:14] input io_brupdate_b2_uop_bp_debug_if, // @[util.scala:453:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[util.scala:453:14] input io_brupdate_b2_valid, // @[util.scala:453:14] input io_brupdate_b2_mispredict, // @[util.scala:453:14] input io_brupdate_b2_taken, // @[util.scala:453:14] input [2:0] io_brupdate_b2_cfi_type, // @[util.scala:453:14] input [1:0] io_brupdate_b2_pc_sel, // @[util.scala:453:14] input [39:0] io_brupdate_b2_jalr_target, // @[util.scala:453:14] input [20:0] io_brupdate_b2_target_offset, // @[util.scala:453:14] input io_flush // @[util.scala:453:14] ); wire [64:0] _ram_ext_R0_data; // @[util.scala:464:20] wire io_enq_valid_0 = io_enq_valid; // @[util.scala:448:7] wire [6:0] io_enq_bits_uop_uopc_0 = io_enq_bits_uop_uopc; // @[util.scala:448:7] wire [31:0] io_enq_bits_uop_inst_0 = io_enq_bits_uop_inst; // @[util.scala:448:7] wire [31:0] io_enq_bits_uop_debug_inst_0 = io_enq_bits_uop_debug_inst; // @[util.scala:448:7] wire io_enq_bits_uop_is_rvc_0 = io_enq_bits_uop_is_rvc; // @[util.scala:448:7] wire [39:0] io_enq_bits_uop_debug_pc_0 = io_enq_bits_uop_debug_pc; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_iq_type_0 = io_enq_bits_uop_iq_type; // @[util.scala:448:7] wire [9:0] io_enq_bits_uop_fu_code_0 = io_enq_bits_uop_fu_code; // @[util.scala:448:7] wire [3:0] io_enq_bits_uop_ctrl_br_type_0 = io_enq_bits_uop_ctrl_br_type; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_ctrl_op1_sel_0 = io_enq_bits_uop_ctrl_op1_sel; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_ctrl_op2_sel_0 = io_enq_bits_uop_ctrl_op2_sel; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_ctrl_imm_sel_0 = io_enq_bits_uop_ctrl_imm_sel; // @[util.scala:448:7] wire [4:0] io_enq_bits_uop_ctrl_op_fcn_0 = io_enq_bits_uop_ctrl_op_fcn; // @[util.scala:448:7] wire io_enq_bits_uop_ctrl_fcn_dw_0 = io_enq_bits_uop_ctrl_fcn_dw; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_ctrl_csr_cmd_0 = io_enq_bits_uop_ctrl_csr_cmd; // @[util.scala:448:7] wire io_enq_bits_uop_ctrl_is_load_0 = io_enq_bits_uop_ctrl_is_load; // @[util.scala:448:7] wire io_enq_bits_uop_ctrl_is_sta_0 = io_enq_bits_uop_ctrl_is_sta; // @[util.scala:448:7] wire io_enq_bits_uop_ctrl_is_std_0 = io_enq_bits_uop_ctrl_is_std; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_iw_state_0 = io_enq_bits_uop_iw_state; // @[util.scala:448:7] wire io_enq_bits_uop_iw_p1_poisoned_0 = io_enq_bits_uop_iw_p1_poisoned; // @[util.scala:448:7] wire io_enq_bits_uop_iw_p2_poisoned_0 = io_enq_bits_uop_iw_p2_poisoned; // @[util.scala:448:7] wire io_enq_bits_uop_is_br_0 = io_enq_bits_uop_is_br; // @[util.scala:448:7] wire io_enq_bits_uop_is_jalr_0 = io_enq_bits_uop_is_jalr; // @[util.scala:448:7] wire io_enq_bits_uop_is_jal_0 = io_enq_bits_uop_is_jal; // @[util.scala:448:7] wire io_enq_bits_uop_is_sfb_0 = io_enq_bits_uop_is_sfb; // @[util.scala:448:7] wire [7:0] io_enq_bits_uop_br_mask_0 = io_enq_bits_uop_br_mask; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_br_tag_0 = io_enq_bits_uop_br_tag; // @[util.scala:448:7] wire [3:0] io_enq_bits_uop_ftq_idx_0 = io_enq_bits_uop_ftq_idx; // @[util.scala:448:7] wire io_enq_bits_uop_edge_inst_0 = io_enq_bits_uop_edge_inst; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_pc_lob_0 = io_enq_bits_uop_pc_lob; // @[util.scala:448:7] wire io_enq_bits_uop_taken_0 = io_enq_bits_uop_taken; // @[util.scala:448:7] wire [19:0] io_enq_bits_uop_imm_packed_0 = io_enq_bits_uop_imm_packed; // @[util.scala:448:7] wire [11:0] io_enq_bits_uop_csr_addr_0 = io_enq_bits_uop_csr_addr; // @[util.scala:448:7] wire [4:0] io_enq_bits_uop_rob_idx_0 = io_enq_bits_uop_rob_idx; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_ldq_idx_0 = io_enq_bits_uop_ldq_idx; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_stq_idx_0 = io_enq_bits_uop_stq_idx; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_rxq_idx_0 = io_enq_bits_uop_rxq_idx; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_pdst_0 = io_enq_bits_uop_pdst; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_prs1_0 = io_enq_bits_uop_prs1; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_prs2_0 = io_enq_bits_uop_prs2; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_prs3_0 = io_enq_bits_uop_prs3; // @[util.scala:448:7] wire [3:0] io_enq_bits_uop_ppred_0 = io_enq_bits_uop_ppred; // @[util.scala:448:7] wire io_enq_bits_uop_prs1_busy_0 = io_enq_bits_uop_prs1_busy; // @[util.scala:448:7] wire io_enq_bits_uop_prs2_busy_0 = io_enq_bits_uop_prs2_busy; // @[util.scala:448:7] wire io_enq_bits_uop_prs3_busy_0 = io_enq_bits_uop_prs3_busy; // @[util.scala:448:7] wire io_enq_bits_uop_ppred_busy_0 = io_enq_bits_uop_ppred_busy; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_stale_pdst_0 = io_enq_bits_uop_stale_pdst; // @[util.scala:448:7] wire io_enq_bits_uop_exception_0 = io_enq_bits_uop_exception; // @[util.scala:448:7] wire [63:0] io_enq_bits_uop_exc_cause_0 = io_enq_bits_uop_exc_cause; // @[util.scala:448:7] wire io_enq_bits_uop_bypassable_0 = io_enq_bits_uop_bypassable; // @[util.scala:448:7] wire [4:0] io_enq_bits_uop_mem_cmd_0 = io_enq_bits_uop_mem_cmd; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_mem_size_0 = io_enq_bits_uop_mem_size; // @[util.scala:448:7] wire io_enq_bits_uop_mem_signed_0 = io_enq_bits_uop_mem_signed; // @[util.scala:448:7] wire io_enq_bits_uop_is_fence_0 = io_enq_bits_uop_is_fence; // @[util.scala:448:7] wire io_enq_bits_uop_is_fencei_0 = io_enq_bits_uop_is_fencei; // @[util.scala:448:7] wire io_enq_bits_uop_is_amo_0 = io_enq_bits_uop_is_amo; // @[util.scala:448:7] wire io_enq_bits_uop_uses_ldq_0 = io_enq_bits_uop_uses_ldq; // @[util.scala:448:7] wire io_enq_bits_uop_uses_stq_0 = io_enq_bits_uop_uses_stq; // @[util.scala:448:7] wire io_enq_bits_uop_is_sys_pc2epc_0 = io_enq_bits_uop_is_sys_pc2epc; // @[util.scala:448:7] wire io_enq_bits_uop_is_unique_0 = io_enq_bits_uop_is_unique; // @[util.scala:448:7] wire io_enq_bits_uop_flush_on_commit_0 = io_enq_bits_uop_flush_on_commit; // @[util.scala:448:7] wire io_enq_bits_uop_ldst_is_rs1_0 = io_enq_bits_uop_ldst_is_rs1; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_ldst_0 = io_enq_bits_uop_ldst; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_lrs1_0 = io_enq_bits_uop_lrs1; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_lrs2_0 = io_enq_bits_uop_lrs2; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_lrs3_0 = io_enq_bits_uop_lrs3; // @[util.scala:448:7] wire io_enq_bits_uop_ldst_val_0 = io_enq_bits_uop_ldst_val; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_dst_rtype_0 = io_enq_bits_uop_dst_rtype; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_lrs1_rtype_0 = io_enq_bits_uop_lrs1_rtype; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_lrs2_rtype_0 = io_enq_bits_uop_lrs2_rtype; // @[util.scala:448:7] wire io_enq_bits_uop_frs3_en_0 = io_enq_bits_uop_frs3_en; // @[util.scala:448:7] wire io_enq_bits_uop_fp_val_0 = io_enq_bits_uop_fp_val; // @[util.scala:448:7] wire io_enq_bits_uop_fp_single_0 = io_enq_bits_uop_fp_single; // @[util.scala:448:7] wire io_enq_bits_uop_xcpt_pf_if_0 = io_enq_bits_uop_xcpt_pf_if; // @[util.scala:448:7] wire io_enq_bits_uop_xcpt_ae_if_0 = io_enq_bits_uop_xcpt_ae_if; // @[util.scala:448:7] wire io_enq_bits_uop_xcpt_ma_if_0 = io_enq_bits_uop_xcpt_ma_if; // @[util.scala:448:7] wire io_enq_bits_uop_bp_debug_if_0 = io_enq_bits_uop_bp_debug_if; // @[util.scala:448:7] wire io_enq_bits_uop_bp_xcpt_if_0 = io_enq_bits_uop_bp_xcpt_if; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_debug_fsrc_0 = io_enq_bits_uop_debug_fsrc; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_debug_tsrc_0 = io_enq_bits_uop_debug_tsrc; // @[util.scala:448:7] wire [63:0] io_enq_bits_data_0 = io_enq_bits_data; // @[util.scala:448:7] wire io_enq_bits_is_hella_0 = io_enq_bits_is_hella; // @[util.scala:448:7] wire io_deq_ready_0 = io_deq_ready; // @[util.scala:448:7] wire [7:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[util.scala:448:7] wire [7:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[util.scala:448:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[util.scala:448:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[util.scala:448:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[util.scala:448:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[util.scala:448:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[util.scala:448:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[util.scala:448:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[util.scala:448:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[util.scala:448:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[util.scala:448:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[util.scala:448:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[util.scala:448:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[util.scala:448:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[util.scala:448:7] wire [7:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[util.scala:448:7] wire [3:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[util.scala:448:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[util.scala:448:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[util.scala:448:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[util.scala:448:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[util.scala:448:7] wire [4:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[util.scala:448:7] wire [3:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[util.scala:448:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[util.scala:448:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[util.scala:448:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[util.scala:448:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[util.scala:448:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[util.scala:448:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[util.scala:448:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[util.scala:448:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[util.scala:448:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[util.scala:448:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[util.scala:448:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[util.scala:448:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[util.scala:448:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[util.scala:448:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[util.scala:448:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[util.scala:448:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[util.scala:448:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[util.scala:448:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[util.scala:448:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[util.scala:448:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[util.scala:448:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[util.scala:448:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[util.scala:448:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[util.scala:448:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[util.scala:448:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[util.scala:448:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[util.scala:448:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[util.scala:448:7] wire io_flush_0 = io_flush; // @[util.scala:448:7] wire _valids_WIRE_0 = 1'h0; // @[util.scala:465:32] wire _valids_WIRE_1 = 1'h0; // @[util.scala:465:32] wire _valids_WIRE_2 = 1'h0; // @[util.scala:465:32] wire _valids_WIRE_3 = 1'h0; // @[util.scala:465:32] wire _io_enq_ready_T; // @[util.scala:504:19] wire _io_deq_valid_T_8; // @[util.scala:509:108] wire [6:0] out_uop_uopc; // @[util.scala:506:17] wire [31:0] out_uop_inst; // @[util.scala:506:17] wire [31:0] out_uop_debug_inst; // @[util.scala:506:17] wire out_uop_is_rvc; // @[util.scala:506:17] wire [39:0] out_uop_debug_pc; // @[util.scala:506:17] wire [2:0] out_uop_iq_type; // @[util.scala:506:17] wire [9:0] out_uop_fu_code; // @[util.scala:506:17] wire [3:0] out_uop_ctrl_br_type; // @[util.scala:506:17] wire [1:0] out_uop_ctrl_op1_sel; // @[util.scala:506:17] wire [2:0] out_uop_ctrl_op2_sel; // @[util.scala:506:17] wire [2:0] out_uop_ctrl_imm_sel; // @[util.scala:506:17] wire [4:0] out_uop_ctrl_op_fcn; // @[util.scala:506:17] wire out_uop_ctrl_fcn_dw; // @[util.scala:506:17] wire [2:0] out_uop_ctrl_csr_cmd; // @[util.scala:506:17] wire out_uop_ctrl_is_load; // @[util.scala:506:17] wire out_uop_ctrl_is_sta; // @[util.scala:506:17] wire out_uop_ctrl_is_std; // @[util.scala:506:17] wire [1:0] out_uop_iw_state; // @[util.scala:506:17] wire out_uop_iw_p1_poisoned; // @[util.scala:506:17] wire out_uop_iw_p2_poisoned; // @[util.scala:506:17] wire out_uop_is_br; // @[util.scala:506:17] wire out_uop_is_jalr; // @[util.scala:506:17] wire out_uop_is_jal; // @[util.scala:506:17] wire out_uop_is_sfb; // @[util.scala:506:17] wire [7:0] _io_deq_bits_uop_br_mask_T_1; // @[util.scala:85:25] wire [2:0] out_uop_br_tag; // @[util.scala:506:17] wire [3:0] out_uop_ftq_idx; // @[util.scala:506:17] wire out_uop_edge_inst; // @[util.scala:506:17] wire [5:0] out_uop_pc_lob; // @[util.scala:506:17] wire out_uop_taken; // @[util.scala:506:17] wire [19:0] out_uop_imm_packed; // @[util.scala:506:17] wire [11:0] out_uop_csr_addr; // @[util.scala:506:17] wire [4:0] out_uop_rob_idx; // @[util.scala:506:17] wire [2:0] out_uop_ldq_idx; // @[util.scala:506:17] wire [2:0] out_uop_stq_idx; // @[util.scala:506:17] wire [1:0] out_uop_rxq_idx; // @[util.scala:506:17] wire [5:0] out_uop_pdst; // @[util.scala:506:17] wire [5:0] out_uop_prs1; // @[util.scala:506:17] wire [5:0] out_uop_prs2; // @[util.scala:506:17] wire [5:0] out_uop_prs3; // @[util.scala:506:17] wire [3:0] out_uop_ppred; // @[util.scala:506:17] wire out_uop_prs1_busy; // @[util.scala:506:17] wire out_uop_prs2_busy; // @[util.scala:506:17] wire out_uop_prs3_busy; // @[util.scala:506:17] wire out_uop_ppred_busy; // @[util.scala:506:17] wire [5:0] out_uop_stale_pdst; // @[util.scala:506:17] wire out_uop_exception; // @[util.scala:506:17] wire [63:0] out_uop_exc_cause; // @[util.scala:506:17] wire out_uop_bypassable; // @[util.scala:506:17] wire [4:0] out_uop_mem_cmd; // @[util.scala:506:17] wire [1:0] out_uop_mem_size; // @[util.scala:506:17] wire out_uop_mem_signed; // @[util.scala:506:17] wire out_uop_is_fence; // @[util.scala:506:17] wire out_uop_is_fencei; // @[util.scala:506:17] wire out_uop_is_amo; // @[util.scala:506:17] wire out_uop_uses_ldq; // @[util.scala:506:17] wire out_uop_uses_stq; // @[util.scala:506:17] wire out_uop_is_sys_pc2epc; // @[util.scala:506:17] wire out_uop_is_unique; // @[util.scala:506:17] wire out_uop_flush_on_commit; // @[util.scala:506:17] wire out_uop_ldst_is_rs1; // @[util.scala:506:17] wire [5:0] out_uop_ldst; // @[util.scala:506:17] wire [5:0] out_uop_lrs1; // @[util.scala:506:17] wire [5:0] out_uop_lrs2; // @[util.scala:506:17] wire [5:0] out_uop_lrs3; // @[util.scala:506:17] wire out_uop_ldst_val; // @[util.scala:506:17] wire [1:0] out_uop_dst_rtype; // @[util.scala:506:17] wire [1:0] out_uop_lrs1_rtype; // @[util.scala:506:17] wire [1:0] out_uop_lrs2_rtype; // @[util.scala:506:17] wire out_uop_frs3_en; // @[util.scala:506:17] wire out_uop_fp_val; // @[util.scala:506:17] wire out_uop_fp_single; // @[util.scala:506:17] wire out_uop_xcpt_pf_if; // @[util.scala:506:17] wire out_uop_xcpt_ae_if; // @[util.scala:506:17] wire out_uop_xcpt_ma_if; // @[util.scala:506:17] wire out_uop_bp_debug_if; // @[util.scala:506:17] wire out_uop_bp_xcpt_if; // @[util.scala:506:17] wire [1:0] out_uop_debug_fsrc; // @[util.scala:506:17] wire [1:0] out_uop_debug_tsrc; // @[util.scala:506:17] wire [63:0] out_data; // @[util.scala:506:17] wire out_is_hella; // @[util.scala:506:17] wire _io_empty_T_1; // @[util.scala:473:25] wire io_enq_ready_0; // @[util.scala:448:7] wire [3:0] io_deq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7] wire [4:0] io_deq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7] wire io_deq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7] wire io_deq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7] wire io_deq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7] wire io_deq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7] wire [6:0] io_deq_bits_uop_uopc_0; // @[util.scala:448:7] wire [31:0] io_deq_bits_uop_inst_0; // @[util.scala:448:7] wire [31:0] io_deq_bits_uop_debug_inst_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_rvc_0; // @[util.scala:448:7] wire [39:0] io_deq_bits_uop_debug_pc_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_iq_type_0; // @[util.scala:448:7] wire [9:0] io_deq_bits_uop_fu_code_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_iw_state_0; // @[util.scala:448:7] wire io_deq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7] wire io_deq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_br_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_jalr_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_jal_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_sfb_0; // @[util.scala:448:7] wire [7:0] io_deq_bits_uop_br_mask_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_br_tag_0; // @[util.scala:448:7] wire [3:0] io_deq_bits_uop_ftq_idx_0; // @[util.scala:448:7] wire io_deq_bits_uop_edge_inst_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_pc_lob_0; // @[util.scala:448:7] wire io_deq_bits_uop_taken_0; // @[util.scala:448:7] wire [19:0] io_deq_bits_uop_imm_packed_0; // @[util.scala:448:7] wire [11:0] io_deq_bits_uop_csr_addr_0; // @[util.scala:448:7] wire [4:0] io_deq_bits_uop_rob_idx_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_ldq_idx_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_stq_idx_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_rxq_idx_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_pdst_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_prs1_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_prs2_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_prs3_0; // @[util.scala:448:7] wire [3:0] io_deq_bits_uop_ppred_0; // @[util.scala:448:7] wire io_deq_bits_uop_prs1_busy_0; // @[util.scala:448:7] wire io_deq_bits_uop_prs2_busy_0; // @[util.scala:448:7] wire io_deq_bits_uop_prs3_busy_0; // @[util.scala:448:7] wire io_deq_bits_uop_ppred_busy_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_stale_pdst_0; // @[util.scala:448:7] wire io_deq_bits_uop_exception_0; // @[util.scala:448:7] wire [63:0] io_deq_bits_uop_exc_cause_0; // @[util.scala:448:7] wire io_deq_bits_uop_bypassable_0; // @[util.scala:448:7] wire [4:0] io_deq_bits_uop_mem_cmd_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_mem_size_0; // @[util.scala:448:7] wire io_deq_bits_uop_mem_signed_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_fence_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_fencei_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_amo_0; // @[util.scala:448:7] wire io_deq_bits_uop_uses_ldq_0; // @[util.scala:448:7] wire io_deq_bits_uop_uses_stq_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_unique_0; // @[util.scala:448:7] wire io_deq_bits_uop_flush_on_commit_0; // @[util.scala:448:7] wire io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_ldst_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_lrs1_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_lrs2_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_lrs3_0; // @[util.scala:448:7] wire io_deq_bits_uop_ldst_val_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_dst_rtype_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7] wire io_deq_bits_uop_frs3_en_0; // @[util.scala:448:7] wire io_deq_bits_uop_fp_val_0; // @[util.scala:448:7] wire io_deq_bits_uop_fp_single_0; // @[util.scala:448:7] wire io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7] wire io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7] wire io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7] wire io_deq_bits_uop_bp_debug_if_0; // @[util.scala:448:7] wire io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_debug_fsrc_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_debug_tsrc_0; // @[util.scala:448:7] wire [63:0] io_deq_bits_data_0; // @[util.scala:448:7] wire io_deq_bits_is_hella_0; // @[util.scala:448:7] wire io_deq_valid_0; // @[util.scala:448:7] wire io_empty; // @[util.scala:448:7] wire [1:0] io_count; // @[util.scala:448:7] assign out_data = _ram_ext_R0_data[63:0]; // @[util.scala:464:20, :506:17] assign out_is_hella = _ram_ext_R0_data[64]; // @[util.scala:464:20, :506:17] reg valids_0; // @[util.scala:465:24] reg valids_1; // @[util.scala:465:24] reg valids_2; // @[util.scala:465:24] reg valids_3; // @[util.scala:465:24] reg [6:0] uops_0_uopc; // @[util.scala:466:20] reg [31:0] uops_0_inst; // @[util.scala:466:20] reg [31:0] uops_0_debug_inst; // @[util.scala:466:20] reg uops_0_is_rvc; // @[util.scala:466:20] reg [39:0] uops_0_debug_pc; // @[util.scala:466:20] reg [2:0] uops_0_iq_type; // @[util.scala:466:20] reg [9:0] uops_0_fu_code; // @[util.scala:466:20] reg [3:0] uops_0_ctrl_br_type; // @[util.scala:466:20] reg [1:0] uops_0_ctrl_op1_sel; // @[util.scala:466:20] reg [2:0] uops_0_ctrl_op2_sel; // @[util.scala:466:20] reg [2:0] uops_0_ctrl_imm_sel; // @[util.scala:466:20] reg [4:0] uops_0_ctrl_op_fcn; // @[util.scala:466:20] reg uops_0_ctrl_fcn_dw; // @[util.scala:466:20] reg [2:0] uops_0_ctrl_csr_cmd; // @[util.scala:466:20] reg uops_0_ctrl_is_load; // @[util.scala:466:20] reg uops_0_ctrl_is_sta; // @[util.scala:466:20] reg uops_0_ctrl_is_std; // @[util.scala:466:20] reg [1:0] uops_0_iw_state; // @[util.scala:466:20] reg uops_0_iw_p1_poisoned; // @[util.scala:466:20] reg uops_0_iw_p2_poisoned; // @[util.scala:466:20] reg uops_0_is_br; // @[util.scala:466:20] reg uops_0_is_jalr; // @[util.scala:466:20] reg uops_0_is_jal; // @[util.scala:466:20] reg uops_0_is_sfb; // @[util.scala:466:20] reg [7:0] uops_0_br_mask; // @[util.scala:466:20] reg [2:0] uops_0_br_tag; // @[util.scala:466:20] reg [3:0] uops_0_ftq_idx; // @[util.scala:466:20] reg uops_0_edge_inst; // @[util.scala:466:20] reg [5:0] uops_0_pc_lob; // @[util.scala:466:20] reg uops_0_taken; // @[util.scala:466:20] reg [19:0] uops_0_imm_packed; // @[util.scala:466:20] reg [11:0] uops_0_csr_addr; // @[util.scala:466:20] reg [4:0] uops_0_rob_idx; // @[util.scala:466:20] reg [2:0] uops_0_ldq_idx; // @[util.scala:466:20] reg [2:0] uops_0_stq_idx; // @[util.scala:466:20] reg [1:0] uops_0_rxq_idx; // @[util.scala:466:20] reg [5:0] uops_0_pdst; // @[util.scala:466:20] reg [5:0] uops_0_prs1; // @[util.scala:466:20] reg [5:0] uops_0_prs2; // @[util.scala:466:20] reg [5:0] uops_0_prs3; // @[util.scala:466:20] reg [3:0] uops_0_ppred; // @[util.scala:466:20] reg uops_0_prs1_busy; // @[util.scala:466:20] reg uops_0_prs2_busy; // @[util.scala:466:20] reg uops_0_prs3_busy; // @[util.scala:466:20] reg uops_0_ppred_busy; // @[util.scala:466:20] reg [5:0] uops_0_stale_pdst; // @[util.scala:466:20] reg uops_0_exception; // @[util.scala:466:20] reg [63:0] uops_0_exc_cause; // @[util.scala:466:20] reg uops_0_bypassable; // @[util.scala:466:20] reg [4:0] uops_0_mem_cmd; // @[util.scala:466:20] reg [1:0] uops_0_mem_size; // @[util.scala:466:20] reg uops_0_mem_signed; // @[util.scala:466:20] reg uops_0_is_fence; // @[util.scala:466:20] reg uops_0_is_fencei; // @[util.scala:466:20] reg uops_0_is_amo; // @[util.scala:466:20] reg uops_0_uses_ldq; // @[util.scala:466:20] reg uops_0_uses_stq; // @[util.scala:466:20] reg uops_0_is_sys_pc2epc; // @[util.scala:466:20] reg uops_0_is_unique; // @[util.scala:466:20] reg uops_0_flush_on_commit; // @[util.scala:466:20] reg uops_0_ldst_is_rs1; // @[util.scala:466:20] reg [5:0] uops_0_ldst; // @[util.scala:466:20] reg [5:0] uops_0_lrs1; // @[util.scala:466:20] reg [5:0] uops_0_lrs2; // @[util.scala:466:20] reg [5:0] uops_0_lrs3; // @[util.scala:466:20] reg uops_0_ldst_val; // @[util.scala:466:20] reg [1:0] uops_0_dst_rtype; // @[util.scala:466:20] reg [1:0] uops_0_lrs1_rtype; // @[util.scala:466:20] reg [1:0] uops_0_lrs2_rtype; // @[util.scala:466:20] reg uops_0_frs3_en; // @[util.scala:466:20] reg uops_0_fp_val; // @[util.scala:466:20] reg uops_0_fp_single; // @[util.scala:466:20] reg uops_0_xcpt_pf_if; // @[util.scala:466:20] reg uops_0_xcpt_ae_if; // @[util.scala:466:20] reg uops_0_xcpt_ma_if; // @[util.scala:466:20] reg uops_0_bp_debug_if; // @[util.scala:466:20] reg uops_0_bp_xcpt_if; // @[util.scala:466:20] reg [1:0] uops_0_debug_fsrc; // @[util.scala:466:20] reg [1:0] uops_0_debug_tsrc; // @[util.scala:466:20] reg [6:0] uops_1_uopc; // @[util.scala:466:20] reg [31:0] uops_1_inst; // @[util.scala:466:20] reg [31:0] uops_1_debug_inst; // @[util.scala:466:20] reg uops_1_is_rvc; // @[util.scala:466:20] reg [39:0] uops_1_debug_pc; // @[util.scala:466:20] reg [2:0] uops_1_iq_type; // @[util.scala:466:20] reg [9:0] uops_1_fu_code; // @[util.scala:466:20] reg [3:0] uops_1_ctrl_br_type; // @[util.scala:466:20] reg [1:0] uops_1_ctrl_op1_sel; // @[util.scala:466:20] reg [2:0] uops_1_ctrl_op2_sel; // @[util.scala:466:20] reg [2:0] uops_1_ctrl_imm_sel; // @[util.scala:466:20] reg [4:0] uops_1_ctrl_op_fcn; // @[util.scala:466:20] reg uops_1_ctrl_fcn_dw; // @[util.scala:466:20] reg [2:0] uops_1_ctrl_csr_cmd; // @[util.scala:466:20] reg uops_1_ctrl_is_load; // @[util.scala:466:20] reg uops_1_ctrl_is_sta; // @[util.scala:466:20] reg uops_1_ctrl_is_std; // @[util.scala:466:20] reg [1:0] uops_1_iw_state; // @[util.scala:466:20] reg uops_1_iw_p1_poisoned; // @[util.scala:466:20] reg uops_1_iw_p2_poisoned; // @[util.scala:466:20] reg uops_1_is_br; // @[util.scala:466:20] reg uops_1_is_jalr; // @[util.scala:466:20] reg uops_1_is_jal; // @[util.scala:466:20] reg uops_1_is_sfb; // @[util.scala:466:20] reg [7:0] uops_1_br_mask; // @[util.scala:466:20] reg [2:0] uops_1_br_tag; // @[util.scala:466:20] reg [3:0] uops_1_ftq_idx; // @[util.scala:466:20] reg uops_1_edge_inst; // @[util.scala:466:20] reg [5:0] uops_1_pc_lob; // @[util.scala:466:20] reg uops_1_taken; // @[util.scala:466:20] reg [19:0] uops_1_imm_packed; // @[util.scala:466:20] reg [11:0] uops_1_csr_addr; // @[util.scala:466:20] reg [4:0] uops_1_rob_idx; // @[util.scala:466:20] reg [2:0] uops_1_ldq_idx; // @[util.scala:466:20] reg [2:0] uops_1_stq_idx; // @[util.scala:466:20] reg [1:0] uops_1_rxq_idx; // @[util.scala:466:20] reg [5:0] uops_1_pdst; // @[util.scala:466:20] reg [5:0] uops_1_prs1; // @[util.scala:466:20] reg [5:0] uops_1_prs2; // @[util.scala:466:20] reg [5:0] uops_1_prs3; // @[util.scala:466:20] reg [3:0] uops_1_ppred; // @[util.scala:466:20] reg uops_1_prs1_busy; // @[util.scala:466:20] reg uops_1_prs2_busy; // @[util.scala:466:20] reg uops_1_prs3_busy; // @[util.scala:466:20] reg uops_1_ppred_busy; // @[util.scala:466:20] reg [5:0] uops_1_stale_pdst; // @[util.scala:466:20] reg uops_1_exception; // @[util.scala:466:20] reg [63:0] uops_1_exc_cause; // @[util.scala:466:20] reg uops_1_bypassable; // @[util.scala:466:20] reg [4:0] uops_1_mem_cmd; // @[util.scala:466:20] reg [1:0] uops_1_mem_size; // @[util.scala:466:20] reg uops_1_mem_signed; // @[util.scala:466:20] reg uops_1_is_fence; // @[util.scala:466:20] reg uops_1_is_fencei; // @[util.scala:466:20] reg uops_1_is_amo; // @[util.scala:466:20] reg uops_1_uses_ldq; // @[util.scala:466:20] reg uops_1_uses_stq; // @[util.scala:466:20] reg uops_1_is_sys_pc2epc; // @[util.scala:466:20] reg uops_1_is_unique; // @[util.scala:466:20] reg uops_1_flush_on_commit; // @[util.scala:466:20] reg uops_1_ldst_is_rs1; // @[util.scala:466:20] reg [5:0] uops_1_ldst; // @[util.scala:466:20] reg [5:0] uops_1_lrs1; // @[util.scala:466:20] reg [5:0] uops_1_lrs2; // @[util.scala:466:20] reg [5:0] uops_1_lrs3; // @[util.scala:466:20] reg uops_1_ldst_val; // @[util.scala:466:20] reg [1:0] uops_1_dst_rtype; // @[util.scala:466:20] reg [1:0] uops_1_lrs1_rtype; // @[util.scala:466:20] reg [1:0] uops_1_lrs2_rtype; // @[util.scala:466:20] reg uops_1_frs3_en; // @[util.scala:466:20] reg uops_1_fp_val; // @[util.scala:466:20] reg uops_1_fp_single; // @[util.scala:466:20] reg uops_1_xcpt_pf_if; // @[util.scala:466:20] reg uops_1_xcpt_ae_if; // @[util.scala:466:20] reg uops_1_xcpt_ma_if; // @[util.scala:466:20] reg uops_1_bp_debug_if; // @[util.scala:466:20] reg uops_1_bp_xcpt_if; // @[util.scala:466:20] reg [1:0] uops_1_debug_fsrc; // @[util.scala:466:20] reg [1:0] uops_1_debug_tsrc; // @[util.scala:466:20] reg [6:0] uops_2_uopc; // @[util.scala:466:20] reg [31:0] uops_2_inst; // @[util.scala:466:20] reg [31:0] uops_2_debug_inst; // @[util.scala:466:20] reg uops_2_is_rvc; // @[util.scala:466:20] reg [39:0] uops_2_debug_pc; // @[util.scala:466:20] reg [2:0] uops_2_iq_type; // @[util.scala:466:20] reg [9:0] uops_2_fu_code; // @[util.scala:466:20] reg [3:0] uops_2_ctrl_br_type; // @[util.scala:466:20] reg [1:0] uops_2_ctrl_op1_sel; // @[util.scala:466:20] reg [2:0] uops_2_ctrl_op2_sel; // @[util.scala:466:20] reg [2:0] uops_2_ctrl_imm_sel; // @[util.scala:466:20] reg [4:0] uops_2_ctrl_op_fcn; // @[util.scala:466:20] reg uops_2_ctrl_fcn_dw; // @[util.scala:466:20] reg [2:0] uops_2_ctrl_csr_cmd; // @[util.scala:466:20] reg uops_2_ctrl_is_load; // @[util.scala:466:20] reg uops_2_ctrl_is_sta; // @[util.scala:466:20] reg uops_2_ctrl_is_std; // @[util.scala:466:20] reg [1:0] uops_2_iw_state; // @[util.scala:466:20] reg uops_2_iw_p1_poisoned; // @[util.scala:466:20] reg uops_2_iw_p2_poisoned; // @[util.scala:466:20] reg uops_2_is_br; // @[util.scala:466:20] reg uops_2_is_jalr; // @[util.scala:466:20] reg uops_2_is_jal; // @[util.scala:466:20] reg uops_2_is_sfb; // @[util.scala:466:20] reg [7:0] uops_2_br_mask; // @[util.scala:466:20] reg [2:0] uops_2_br_tag; // @[util.scala:466:20] reg [3:0] uops_2_ftq_idx; // @[util.scala:466:20] reg uops_2_edge_inst; // @[util.scala:466:20] reg [5:0] uops_2_pc_lob; // @[util.scala:466:20] reg uops_2_taken; // @[util.scala:466:20] reg [19:0] uops_2_imm_packed; // @[util.scala:466:20] reg [11:0] uops_2_csr_addr; // @[util.scala:466:20] reg [4:0] uops_2_rob_idx; // @[util.scala:466:20] reg [2:0] uops_2_ldq_idx; // @[util.scala:466:20] reg [2:0] uops_2_stq_idx; // @[util.scala:466:20] reg [1:0] uops_2_rxq_idx; // @[util.scala:466:20] reg [5:0] uops_2_pdst; // @[util.scala:466:20] reg [5:0] uops_2_prs1; // @[util.scala:466:20] reg [5:0] uops_2_prs2; // @[util.scala:466:20] reg [5:0] uops_2_prs3; // @[util.scala:466:20] reg [3:0] uops_2_ppred; // @[util.scala:466:20] reg uops_2_prs1_busy; // @[util.scala:466:20] reg uops_2_prs2_busy; // @[util.scala:466:20] reg uops_2_prs3_busy; // @[util.scala:466:20] reg uops_2_ppred_busy; // @[util.scala:466:20] reg [5:0] uops_2_stale_pdst; // @[util.scala:466:20] reg uops_2_exception; // @[util.scala:466:20] reg [63:0] uops_2_exc_cause; // @[util.scala:466:20] reg uops_2_bypassable; // @[util.scala:466:20] reg [4:0] uops_2_mem_cmd; // @[util.scala:466:20] reg [1:0] uops_2_mem_size; // @[util.scala:466:20] reg uops_2_mem_signed; // @[util.scala:466:20] reg uops_2_is_fence; // @[util.scala:466:20] reg uops_2_is_fencei; // @[util.scala:466:20] reg uops_2_is_amo; // @[util.scala:466:20] reg uops_2_uses_ldq; // @[util.scala:466:20] reg uops_2_uses_stq; // @[util.scala:466:20] reg uops_2_is_sys_pc2epc; // @[util.scala:466:20] reg uops_2_is_unique; // @[util.scala:466:20] reg uops_2_flush_on_commit; // @[util.scala:466:20] reg uops_2_ldst_is_rs1; // @[util.scala:466:20] reg [5:0] uops_2_ldst; // @[util.scala:466:20] reg [5:0] uops_2_lrs1; // @[util.scala:466:20] reg [5:0] uops_2_lrs2; // @[util.scala:466:20] reg [5:0] uops_2_lrs3; // @[util.scala:466:20] reg uops_2_ldst_val; // @[util.scala:466:20] reg [1:0] uops_2_dst_rtype; // @[util.scala:466:20] reg [1:0] uops_2_lrs1_rtype; // @[util.scala:466:20] reg [1:0] uops_2_lrs2_rtype; // @[util.scala:466:20] reg uops_2_frs3_en; // @[util.scala:466:20] reg uops_2_fp_val; // @[util.scala:466:20] reg uops_2_fp_single; // @[util.scala:466:20] reg uops_2_xcpt_pf_if; // @[util.scala:466:20] reg uops_2_xcpt_ae_if; // @[util.scala:466:20] reg uops_2_xcpt_ma_if; // @[util.scala:466:20] reg uops_2_bp_debug_if; // @[util.scala:466:20] reg uops_2_bp_xcpt_if; // @[util.scala:466:20] reg [1:0] uops_2_debug_fsrc; // @[util.scala:466:20] reg [1:0] uops_2_debug_tsrc; // @[util.scala:466:20] reg [6:0] uops_3_uopc; // @[util.scala:466:20] reg [31:0] uops_3_inst; // @[util.scala:466:20] reg [31:0] uops_3_debug_inst; // @[util.scala:466:20] reg uops_3_is_rvc; // @[util.scala:466:20] reg [39:0] uops_3_debug_pc; // @[util.scala:466:20] reg [2:0] uops_3_iq_type; // @[util.scala:466:20] reg [9:0] uops_3_fu_code; // @[util.scala:466:20] reg [3:0] uops_3_ctrl_br_type; // @[util.scala:466:20] reg [1:0] uops_3_ctrl_op1_sel; // @[util.scala:466:20] reg [2:0] uops_3_ctrl_op2_sel; // @[util.scala:466:20] reg [2:0] uops_3_ctrl_imm_sel; // @[util.scala:466:20] reg [4:0] uops_3_ctrl_op_fcn; // @[util.scala:466:20] reg uops_3_ctrl_fcn_dw; // @[util.scala:466:20] reg [2:0] uops_3_ctrl_csr_cmd; // @[util.scala:466:20] reg uops_3_ctrl_is_load; // @[util.scala:466:20] reg uops_3_ctrl_is_sta; // @[util.scala:466:20] reg uops_3_ctrl_is_std; // @[util.scala:466:20] reg [1:0] uops_3_iw_state; // @[util.scala:466:20] reg uops_3_iw_p1_poisoned; // @[util.scala:466:20] reg uops_3_iw_p2_poisoned; // @[util.scala:466:20] reg uops_3_is_br; // @[util.scala:466:20] reg uops_3_is_jalr; // @[util.scala:466:20] reg uops_3_is_jal; // @[util.scala:466:20] reg uops_3_is_sfb; // @[util.scala:466:20] reg [7:0] uops_3_br_mask; // @[util.scala:466:20] reg [2:0] uops_3_br_tag; // @[util.scala:466:20] reg [3:0] uops_3_ftq_idx; // @[util.scala:466:20] reg uops_3_edge_inst; // @[util.scala:466:20] reg [5:0] uops_3_pc_lob; // @[util.scala:466:20] reg uops_3_taken; // @[util.scala:466:20] reg [19:0] uops_3_imm_packed; // @[util.scala:466:20] reg [11:0] uops_3_csr_addr; // @[util.scala:466:20] reg [4:0] uops_3_rob_idx; // @[util.scala:466:20] reg [2:0] uops_3_ldq_idx; // @[util.scala:466:20] reg [2:0] uops_3_stq_idx; // @[util.scala:466:20] reg [1:0] uops_3_rxq_idx; // @[util.scala:466:20] reg [5:0] uops_3_pdst; // @[util.scala:466:20] reg [5:0] uops_3_prs1; // @[util.scala:466:20] reg [5:0] uops_3_prs2; // @[util.scala:466:20] reg [5:0] uops_3_prs3; // @[util.scala:466:20] reg [3:0] uops_3_ppred; // @[util.scala:466:20] reg uops_3_prs1_busy; // @[util.scala:466:20] reg uops_3_prs2_busy; // @[util.scala:466:20] reg uops_3_prs3_busy; // @[util.scala:466:20] reg uops_3_ppred_busy; // @[util.scala:466:20] reg [5:0] uops_3_stale_pdst; // @[util.scala:466:20] reg uops_3_exception; // @[util.scala:466:20] reg [63:0] uops_3_exc_cause; // @[util.scala:466:20] reg uops_3_bypassable; // @[util.scala:466:20] reg [4:0] uops_3_mem_cmd; // @[util.scala:466:20] reg [1:0] uops_3_mem_size; // @[util.scala:466:20] reg uops_3_mem_signed; // @[util.scala:466:20] reg uops_3_is_fence; // @[util.scala:466:20] reg uops_3_is_fencei; // @[util.scala:466:20] reg uops_3_is_amo; // @[util.scala:466:20] reg uops_3_uses_ldq; // @[util.scala:466:20] reg uops_3_uses_stq; // @[util.scala:466:20] reg uops_3_is_sys_pc2epc; // @[util.scala:466:20] reg uops_3_is_unique; // @[util.scala:466:20] reg uops_3_flush_on_commit; // @[util.scala:466:20] reg uops_3_ldst_is_rs1; // @[util.scala:466:20] reg [5:0] uops_3_ldst; // @[util.scala:466:20] reg [5:0] uops_3_lrs1; // @[util.scala:466:20] reg [5:0] uops_3_lrs2; // @[util.scala:466:20] reg [5:0] uops_3_lrs3; // @[util.scala:466:20] reg uops_3_ldst_val; // @[util.scala:466:20] reg [1:0] uops_3_dst_rtype; // @[util.scala:466:20] reg [1:0] uops_3_lrs1_rtype; // @[util.scala:466:20] reg [1:0] uops_3_lrs2_rtype; // @[util.scala:466:20] reg uops_3_frs3_en; // @[util.scala:466:20] reg uops_3_fp_val; // @[util.scala:466:20] reg uops_3_fp_single; // @[util.scala:466:20] reg uops_3_xcpt_pf_if; // @[util.scala:466:20] reg uops_3_xcpt_ae_if; // @[util.scala:466:20] reg uops_3_xcpt_ma_if; // @[util.scala:466:20] reg uops_3_bp_debug_if; // @[util.scala:466:20] reg uops_3_bp_xcpt_if; // @[util.scala:466:20] reg [1:0] uops_3_debug_fsrc; // @[util.scala:466:20] reg [1:0] uops_3_debug_tsrc; // @[util.scala:466:20] reg [1:0] enq_ptr_value; // @[Counter.scala:61:40] reg [1:0] deq_ptr_value; // @[Counter.scala:61:40] reg maybe_full; // @[util.scala:470:27] wire ptr_match = enq_ptr_value == deq_ptr_value; // @[Counter.scala:61:40] wire _io_empty_T = ~maybe_full; // @[util.scala:470:27, :473:28] assign _io_empty_T_1 = ptr_match & _io_empty_T; // @[util.scala:472:33, :473:{25,28}] assign io_empty = _io_empty_T_1; // @[util.scala:448:7, :473:25] wire _GEN = ptr_match & maybe_full; // @[util.scala:470:27, :472:33, :474:24] wire full; // @[util.scala:474:24] assign full = _GEN; // @[util.scala:474:24] wire _io_count_T; // @[util.scala:526:32] assign _io_count_T = _GEN; // @[util.scala:474:24, :526:32] wire _do_enq_T = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35] wire do_enq = _do_enq_T; // @[Decoupled.scala:51:35] wire [3:0] _GEN_0 = {{valids_3}, {valids_2}, {valids_1}, {valids_0}}; // @[util.scala:465:24, :476:42] wire _GEN_1 = _GEN_0[deq_ptr_value]; // @[Counter.scala:61:40] wire _do_deq_T = ~_GEN_1; // @[util.scala:476:42] wire _do_deq_T_1 = io_deq_ready_0 | _do_deq_T; // @[util.scala:448:7, :476:{39,42}] wire _do_deq_T_2 = ~io_empty; // @[util.scala:448:7, :476:69] wire _do_deq_T_3 = _do_deq_T_1 & _do_deq_T_2; // @[util.scala:476:{39,66,69}] wire do_deq = _do_deq_T_3; // @[util.scala:476:{24,66}] wire [7:0] _valids_0_T = io_brupdate_b1_mispredict_mask_0 & uops_0_br_mask; // @[util.scala:118:51, :448:7, :466:20] wire _valids_0_T_1 = |_valids_0_T; // @[util.scala:118:{51,59}] wire _valids_0_T_2 = ~_valids_0_T_1; // @[util.scala:118:59, :481:32] wire _valids_0_T_3 = valids_0 & _valids_0_T_2; // @[util.scala:465:24, :481:{29,32}] wire _valids_0_T_4 = io_flush_0 & uops_0_uses_ldq; // @[util.scala:448:7, :466:20, :481:83] wire _valids_0_T_5 = ~_valids_0_T_4; // @[util.scala:481:{72,83}] wire _valids_0_T_6 = _valids_0_T_3 & _valids_0_T_5; // @[util.scala:481:{29,69,72}] wire [7:0] _uops_0_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:89:23, :448:7] wire [7:0] _uops_0_br_mask_T_1 = uops_0_br_mask & _uops_0_br_mask_T; // @[util.scala:89:{21,23}, :466:20] wire [7:0] _valids_1_T = io_brupdate_b1_mispredict_mask_0 & uops_1_br_mask; // @[util.scala:118:51, :448:7, :466:20] wire _valids_1_T_1 = |_valids_1_T; // @[util.scala:118:{51,59}] wire _valids_1_T_2 = ~_valids_1_T_1; // @[util.scala:118:59, :481:32] wire _valids_1_T_3 = valids_1 & _valids_1_T_2; // @[util.scala:465:24, :481:{29,32}] wire _valids_1_T_4 = io_flush_0 & uops_1_uses_ldq; // @[util.scala:448:7, :466:20, :481:83] wire _valids_1_T_5 = ~_valids_1_T_4; // @[util.scala:481:{72,83}] wire _valids_1_T_6 = _valids_1_T_3 & _valids_1_T_5; // @[util.scala:481:{29,69,72}] wire [7:0] _uops_1_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:89:23, :448:7] wire [7:0] _uops_1_br_mask_T_1 = uops_1_br_mask & _uops_1_br_mask_T; // @[util.scala:89:{21,23}, :466:20] wire [7:0] _valids_2_T = io_brupdate_b1_mispredict_mask_0 & uops_2_br_mask; // @[util.scala:118:51, :448:7, :466:20] wire _valids_2_T_1 = |_valids_2_T; // @[util.scala:118:{51,59}] wire _valids_2_T_2 = ~_valids_2_T_1; // @[util.scala:118:59, :481:32] wire _valids_2_T_3 = valids_2 & _valids_2_T_2; // @[util.scala:465:24, :481:{29,32}] wire _valids_2_T_4 = io_flush_0 & uops_2_uses_ldq; // @[util.scala:448:7, :466:20, :481:83] wire _valids_2_T_5 = ~_valids_2_T_4; // @[util.scala:481:{72,83}] wire _valids_2_T_6 = _valids_2_T_3 & _valids_2_T_5; // @[util.scala:481:{29,69,72}] wire [7:0] _uops_2_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:89:23, :448:7] wire [7:0] _uops_2_br_mask_T_1 = uops_2_br_mask & _uops_2_br_mask_T; // @[util.scala:89:{21,23}, :466:20] wire [7:0] _valids_3_T = io_brupdate_b1_mispredict_mask_0 & uops_3_br_mask; // @[util.scala:118:51, :448:7, :466:20] wire _valids_3_T_1 = |_valids_3_T; // @[util.scala:118:{51,59}] wire _valids_3_T_2 = ~_valids_3_T_1; // @[util.scala:118:59, :481:32] wire _valids_3_T_3 = valids_3 & _valids_3_T_2; // @[util.scala:465:24, :481:{29,32}] wire _valids_3_T_4 = io_flush_0 & uops_3_uses_ldq; // @[util.scala:448:7, :466:20, :481:83] wire _valids_3_T_5 = ~_valids_3_T_4; // @[util.scala:481:{72,83}] wire _valids_3_T_6 = _valids_3_T_3 & _valids_3_T_5; // @[util.scala:481:{29,69,72}] wire [7:0] _uops_3_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:89:23, :448:7] wire [7:0] _uops_3_br_mask_T_1 = uops_3_br_mask & _uops_3_br_mask_T; // @[util.scala:89:{21,23}, :466:20] wire [7:0] _uops_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:85:27, :89:23, :448:7] wire [7:0] _uops_br_mask_T_1 = io_enq_bits_uop_br_mask_0 & _uops_br_mask_T; // @[util.scala:85:{25,27}, :448:7] wire wrap = &enq_ptr_value; // @[Counter.scala:61:40, :73:24] wire [2:0] _GEN_2 = {1'h0, enq_ptr_value}; // @[Counter.scala:61:40, :77:24] wire [2:0] _value_T = _GEN_2 + 3'h1; // @[Counter.scala:77:24] wire [1:0] _value_T_1 = _value_T[1:0]; // @[Counter.scala:77:24] wire wrap_1 = &deq_ptr_value; // @[Counter.scala:61:40, :73:24] wire [2:0] _GEN_3 = {1'h0, deq_ptr_value}; // @[Counter.scala:61:40, :77:24] wire [2:0] _value_T_2 = _GEN_3 + 3'h1; // @[Counter.scala:77:24] wire [1:0] _value_T_3 = _value_T_2[1:0]; // @[Counter.scala:77:24] assign _io_enq_ready_T = ~full; // @[util.scala:474:24, :504:19] assign io_enq_ready_0 = _io_enq_ready_T; // @[util.scala:448:7, :504:19] assign io_deq_bits_uop_uopc_0 = out_uop_uopc; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_inst_0 = out_uop_inst; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_debug_inst_0 = out_uop_debug_inst; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_is_rvc_0 = out_uop_is_rvc; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_debug_pc_0 = out_uop_debug_pc; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_iq_type_0 = out_uop_iq_type; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_fu_code_0 = out_uop_fu_code; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ctrl_br_type_0 = out_uop_ctrl_br_type; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ctrl_op1_sel_0 = out_uop_ctrl_op1_sel; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ctrl_op2_sel_0 = out_uop_ctrl_op2_sel; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ctrl_imm_sel_0 = out_uop_ctrl_imm_sel; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ctrl_op_fcn_0 = out_uop_ctrl_op_fcn; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ctrl_fcn_dw_0 = out_uop_ctrl_fcn_dw; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ctrl_csr_cmd_0 = out_uop_ctrl_csr_cmd; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ctrl_is_load_0 = out_uop_ctrl_is_load; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ctrl_is_sta_0 = out_uop_ctrl_is_sta; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ctrl_is_std_0 = out_uop_ctrl_is_std; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_iw_state_0 = out_uop_iw_state; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_iw_p1_poisoned_0 = out_uop_iw_p1_poisoned; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_iw_p2_poisoned_0 = out_uop_iw_p2_poisoned; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_is_br_0 = out_uop_is_br; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_is_jalr_0 = out_uop_is_jalr; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_is_jal_0 = out_uop_is_jal; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_is_sfb_0 = out_uop_is_sfb; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_br_tag_0 = out_uop_br_tag; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ftq_idx_0 = out_uop_ftq_idx; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_edge_inst_0 = out_uop_edge_inst; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_pc_lob_0 = out_uop_pc_lob; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_taken_0 = out_uop_taken; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_imm_packed_0 = out_uop_imm_packed; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_csr_addr_0 = out_uop_csr_addr; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_rob_idx_0 = out_uop_rob_idx; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ldq_idx_0 = out_uop_ldq_idx; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_stq_idx_0 = out_uop_stq_idx; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_rxq_idx_0 = out_uop_rxq_idx; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_pdst_0 = out_uop_pdst; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_prs1_0 = out_uop_prs1; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_prs2_0 = out_uop_prs2; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_prs3_0 = out_uop_prs3; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ppred_0 = out_uop_ppred; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_prs1_busy_0 = out_uop_prs1_busy; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_prs2_busy_0 = out_uop_prs2_busy; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_prs3_busy_0 = out_uop_prs3_busy; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ppred_busy_0 = out_uop_ppred_busy; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_stale_pdst_0 = out_uop_stale_pdst; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_exception_0 = out_uop_exception; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_exc_cause_0 = out_uop_exc_cause; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_bypassable_0 = out_uop_bypassable; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_mem_cmd_0 = out_uop_mem_cmd; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_mem_size_0 = out_uop_mem_size; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_mem_signed_0 = out_uop_mem_signed; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_is_fence_0 = out_uop_is_fence; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_is_fencei_0 = out_uop_is_fencei; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_is_amo_0 = out_uop_is_amo; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_uses_ldq_0 = out_uop_uses_ldq; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_uses_stq_0 = out_uop_uses_stq; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_is_sys_pc2epc_0 = out_uop_is_sys_pc2epc; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_is_unique_0 = out_uop_is_unique; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_flush_on_commit_0 = out_uop_flush_on_commit; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ldst_is_rs1_0 = out_uop_ldst_is_rs1; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ldst_0 = out_uop_ldst; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_lrs1_0 = out_uop_lrs1; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_lrs2_0 = out_uop_lrs2; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_lrs3_0 = out_uop_lrs3; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_ldst_val_0 = out_uop_ldst_val; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_dst_rtype_0 = out_uop_dst_rtype; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_lrs1_rtype_0 = out_uop_lrs1_rtype; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_lrs2_rtype_0 = out_uop_lrs2_rtype; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_frs3_en_0 = out_uop_frs3_en; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_fp_val_0 = out_uop_fp_val; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_fp_single_0 = out_uop_fp_single; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_xcpt_pf_if_0 = out_uop_xcpt_pf_if; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_xcpt_ae_if_0 = out_uop_xcpt_ae_if; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_xcpt_ma_if_0 = out_uop_xcpt_ma_if; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_bp_debug_if_0 = out_uop_bp_debug_if; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_bp_xcpt_if_0 = out_uop_bp_xcpt_if; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_debug_fsrc_0 = out_uop_debug_fsrc; // @[util.scala:448:7, :506:17] assign io_deq_bits_uop_debug_tsrc_0 = out_uop_debug_tsrc; // @[util.scala:448:7, :506:17] assign io_deq_bits_data_0 = out_data; // @[util.scala:448:7, :506:17] assign io_deq_bits_is_hella_0 = out_is_hella; // @[util.scala:448:7, :506:17] wire [7:0] out_uop_br_mask; // @[util.scala:506:17] wire [3:0][6:0] _GEN_4 = {{uops_3_uopc}, {uops_2_uopc}, {uops_1_uopc}, {uops_0_uopc}}; // @[util.scala:466:20, :508:19] assign out_uop_uopc = _GEN_4[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][31:0] _GEN_5 = {{uops_3_inst}, {uops_2_inst}, {uops_1_inst}, {uops_0_inst}}; // @[util.scala:466:20, :508:19] assign out_uop_inst = _GEN_5[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][31:0] _GEN_6 = {{uops_3_debug_inst}, {uops_2_debug_inst}, {uops_1_debug_inst}, {uops_0_debug_inst}}; // @[util.scala:466:20, :508:19] assign out_uop_debug_inst = _GEN_6[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_7 = {{uops_3_is_rvc}, {uops_2_is_rvc}, {uops_1_is_rvc}, {uops_0_is_rvc}}; // @[util.scala:466:20, :508:19] assign out_uop_is_rvc = _GEN_7[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][39:0] _GEN_8 = {{uops_3_debug_pc}, {uops_2_debug_pc}, {uops_1_debug_pc}, {uops_0_debug_pc}}; // @[util.scala:466:20, :508:19] assign out_uop_debug_pc = _GEN_8[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][2:0] _GEN_9 = {{uops_3_iq_type}, {uops_2_iq_type}, {uops_1_iq_type}, {uops_0_iq_type}}; // @[util.scala:466:20, :508:19] assign out_uop_iq_type = _GEN_9[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][9:0] _GEN_10 = {{uops_3_fu_code}, {uops_2_fu_code}, {uops_1_fu_code}, {uops_0_fu_code}}; // @[util.scala:466:20, :508:19] assign out_uop_fu_code = _GEN_10[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][3:0] _GEN_11 = {{uops_3_ctrl_br_type}, {uops_2_ctrl_br_type}, {uops_1_ctrl_br_type}, {uops_0_ctrl_br_type}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_br_type = _GEN_11[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][1:0] _GEN_12 = {{uops_3_ctrl_op1_sel}, {uops_2_ctrl_op1_sel}, {uops_1_ctrl_op1_sel}, {uops_0_ctrl_op1_sel}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_op1_sel = _GEN_12[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][2:0] _GEN_13 = {{uops_3_ctrl_op2_sel}, {uops_2_ctrl_op2_sel}, {uops_1_ctrl_op2_sel}, {uops_0_ctrl_op2_sel}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_op2_sel = _GEN_13[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][2:0] _GEN_14 = {{uops_3_ctrl_imm_sel}, {uops_2_ctrl_imm_sel}, {uops_1_ctrl_imm_sel}, {uops_0_ctrl_imm_sel}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_imm_sel = _GEN_14[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][4:0] _GEN_15 = {{uops_3_ctrl_op_fcn}, {uops_2_ctrl_op_fcn}, {uops_1_ctrl_op_fcn}, {uops_0_ctrl_op_fcn}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_op_fcn = _GEN_15[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_16 = {{uops_3_ctrl_fcn_dw}, {uops_2_ctrl_fcn_dw}, {uops_1_ctrl_fcn_dw}, {uops_0_ctrl_fcn_dw}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_fcn_dw = _GEN_16[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][2:0] _GEN_17 = {{uops_3_ctrl_csr_cmd}, {uops_2_ctrl_csr_cmd}, {uops_1_ctrl_csr_cmd}, {uops_0_ctrl_csr_cmd}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_csr_cmd = _GEN_17[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_18 = {{uops_3_ctrl_is_load}, {uops_2_ctrl_is_load}, {uops_1_ctrl_is_load}, {uops_0_ctrl_is_load}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_is_load = _GEN_18[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_19 = {{uops_3_ctrl_is_sta}, {uops_2_ctrl_is_sta}, {uops_1_ctrl_is_sta}, {uops_0_ctrl_is_sta}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_is_sta = _GEN_19[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_20 = {{uops_3_ctrl_is_std}, {uops_2_ctrl_is_std}, {uops_1_ctrl_is_std}, {uops_0_ctrl_is_std}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_is_std = _GEN_20[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][1:0] _GEN_21 = {{uops_3_iw_state}, {uops_2_iw_state}, {uops_1_iw_state}, {uops_0_iw_state}}; // @[util.scala:466:20, :508:19] assign out_uop_iw_state = _GEN_21[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_22 = {{uops_3_iw_p1_poisoned}, {uops_2_iw_p1_poisoned}, {uops_1_iw_p1_poisoned}, {uops_0_iw_p1_poisoned}}; // @[util.scala:466:20, :508:19] assign out_uop_iw_p1_poisoned = _GEN_22[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_23 = {{uops_3_iw_p2_poisoned}, {uops_2_iw_p2_poisoned}, {uops_1_iw_p2_poisoned}, {uops_0_iw_p2_poisoned}}; // @[util.scala:466:20, :508:19] assign out_uop_iw_p2_poisoned = _GEN_23[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_24 = {{uops_3_is_br}, {uops_2_is_br}, {uops_1_is_br}, {uops_0_is_br}}; // @[util.scala:466:20, :508:19] assign out_uop_is_br = _GEN_24[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_25 = {{uops_3_is_jalr}, {uops_2_is_jalr}, {uops_1_is_jalr}, {uops_0_is_jalr}}; // @[util.scala:466:20, :508:19] assign out_uop_is_jalr = _GEN_25[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_26 = {{uops_3_is_jal}, {uops_2_is_jal}, {uops_1_is_jal}, {uops_0_is_jal}}; // @[util.scala:466:20, :508:19] assign out_uop_is_jal = _GEN_26[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_27 = {{uops_3_is_sfb}, {uops_2_is_sfb}, {uops_1_is_sfb}, {uops_0_is_sfb}}; // @[util.scala:466:20, :508:19] assign out_uop_is_sfb = _GEN_27[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][7:0] _GEN_28 = {{uops_3_br_mask}, {uops_2_br_mask}, {uops_1_br_mask}, {uops_0_br_mask}}; // @[util.scala:466:20, :508:19] assign out_uop_br_mask = _GEN_28[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][2:0] _GEN_29 = {{uops_3_br_tag}, {uops_2_br_tag}, {uops_1_br_tag}, {uops_0_br_tag}}; // @[util.scala:466:20, :508:19] assign out_uop_br_tag = _GEN_29[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][3:0] _GEN_30 = {{uops_3_ftq_idx}, {uops_2_ftq_idx}, {uops_1_ftq_idx}, {uops_0_ftq_idx}}; // @[util.scala:466:20, :508:19] assign out_uop_ftq_idx = _GEN_30[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_31 = {{uops_3_edge_inst}, {uops_2_edge_inst}, {uops_1_edge_inst}, {uops_0_edge_inst}}; // @[util.scala:466:20, :508:19] assign out_uop_edge_inst = _GEN_31[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][5:0] _GEN_32 = {{uops_3_pc_lob}, {uops_2_pc_lob}, {uops_1_pc_lob}, {uops_0_pc_lob}}; // @[util.scala:466:20, :508:19] assign out_uop_pc_lob = _GEN_32[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_33 = {{uops_3_taken}, {uops_2_taken}, {uops_1_taken}, {uops_0_taken}}; // @[util.scala:466:20, :508:19] assign out_uop_taken = _GEN_33[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][19:0] _GEN_34 = {{uops_3_imm_packed}, {uops_2_imm_packed}, {uops_1_imm_packed}, {uops_0_imm_packed}}; // @[util.scala:466:20, :508:19] assign out_uop_imm_packed = _GEN_34[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][11:0] _GEN_35 = {{uops_3_csr_addr}, {uops_2_csr_addr}, {uops_1_csr_addr}, {uops_0_csr_addr}}; // @[util.scala:466:20, :508:19] assign out_uop_csr_addr = _GEN_35[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][4:0] _GEN_36 = {{uops_3_rob_idx}, {uops_2_rob_idx}, {uops_1_rob_idx}, {uops_0_rob_idx}}; // @[util.scala:466:20, :508:19] assign out_uop_rob_idx = _GEN_36[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][2:0] _GEN_37 = {{uops_3_ldq_idx}, {uops_2_ldq_idx}, {uops_1_ldq_idx}, {uops_0_ldq_idx}}; // @[util.scala:466:20, :508:19] assign out_uop_ldq_idx = _GEN_37[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][2:0] _GEN_38 = {{uops_3_stq_idx}, {uops_2_stq_idx}, {uops_1_stq_idx}, {uops_0_stq_idx}}; // @[util.scala:466:20, :508:19] assign out_uop_stq_idx = _GEN_38[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][1:0] _GEN_39 = {{uops_3_rxq_idx}, {uops_2_rxq_idx}, {uops_1_rxq_idx}, {uops_0_rxq_idx}}; // @[util.scala:466:20, :508:19] assign out_uop_rxq_idx = _GEN_39[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][5:0] _GEN_40 = {{uops_3_pdst}, {uops_2_pdst}, {uops_1_pdst}, {uops_0_pdst}}; // @[util.scala:466:20, :508:19] assign out_uop_pdst = _GEN_40[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][5:0] _GEN_41 = {{uops_3_prs1}, {uops_2_prs1}, {uops_1_prs1}, {uops_0_prs1}}; // @[util.scala:466:20, :508:19] assign out_uop_prs1 = _GEN_41[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][5:0] _GEN_42 = {{uops_3_prs2}, {uops_2_prs2}, {uops_1_prs2}, {uops_0_prs2}}; // @[util.scala:466:20, :508:19] assign out_uop_prs2 = _GEN_42[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][5:0] _GEN_43 = {{uops_3_prs3}, {uops_2_prs3}, {uops_1_prs3}, {uops_0_prs3}}; // @[util.scala:466:20, :508:19] assign out_uop_prs3 = _GEN_43[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][3:0] _GEN_44 = {{uops_3_ppred}, {uops_2_ppred}, {uops_1_ppred}, {uops_0_ppred}}; // @[util.scala:466:20, :508:19] assign out_uop_ppred = _GEN_44[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_45 = {{uops_3_prs1_busy}, {uops_2_prs1_busy}, {uops_1_prs1_busy}, {uops_0_prs1_busy}}; // @[util.scala:466:20, :508:19] assign out_uop_prs1_busy = _GEN_45[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_46 = {{uops_3_prs2_busy}, {uops_2_prs2_busy}, {uops_1_prs2_busy}, {uops_0_prs2_busy}}; // @[util.scala:466:20, :508:19] assign out_uop_prs2_busy = _GEN_46[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_47 = {{uops_3_prs3_busy}, {uops_2_prs3_busy}, {uops_1_prs3_busy}, {uops_0_prs3_busy}}; // @[util.scala:466:20, :508:19] assign out_uop_prs3_busy = _GEN_47[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_48 = {{uops_3_ppred_busy}, {uops_2_ppred_busy}, {uops_1_ppred_busy}, {uops_0_ppred_busy}}; // @[util.scala:466:20, :508:19] assign out_uop_ppred_busy = _GEN_48[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][5:0] _GEN_49 = {{uops_3_stale_pdst}, {uops_2_stale_pdst}, {uops_1_stale_pdst}, {uops_0_stale_pdst}}; // @[util.scala:466:20, :508:19] assign out_uop_stale_pdst = _GEN_49[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_50 = {{uops_3_exception}, {uops_2_exception}, {uops_1_exception}, {uops_0_exception}}; // @[util.scala:466:20, :508:19] assign out_uop_exception = _GEN_50[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][63:0] _GEN_51 = {{uops_3_exc_cause}, {uops_2_exc_cause}, {uops_1_exc_cause}, {uops_0_exc_cause}}; // @[util.scala:466:20, :508:19] assign out_uop_exc_cause = _GEN_51[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_52 = {{uops_3_bypassable}, {uops_2_bypassable}, {uops_1_bypassable}, {uops_0_bypassable}}; // @[util.scala:466:20, :508:19] assign out_uop_bypassable = _GEN_52[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][4:0] _GEN_53 = {{uops_3_mem_cmd}, {uops_2_mem_cmd}, {uops_1_mem_cmd}, {uops_0_mem_cmd}}; // @[util.scala:466:20, :508:19] assign out_uop_mem_cmd = _GEN_53[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][1:0] _GEN_54 = {{uops_3_mem_size}, {uops_2_mem_size}, {uops_1_mem_size}, {uops_0_mem_size}}; // @[util.scala:466:20, :508:19] assign out_uop_mem_size = _GEN_54[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_55 = {{uops_3_mem_signed}, {uops_2_mem_signed}, {uops_1_mem_signed}, {uops_0_mem_signed}}; // @[util.scala:466:20, :508:19] assign out_uop_mem_signed = _GEN_55[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_56 = {{uops_3_is_fence}, {uops_2_is_fence}, {uops_1_is_fence}, {uops_0_is_fence}}; // @[util.scala:466:20, :508:19] assign out_uop_is_fence = _GEN_56[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_57 = {{uops_3_is_fencei}, {uops_2_is_fencei}, {uops_1_is_fencei}, {uops_0_is_fencei}}; // @[util.scala:466:20, :508:19] assign out_uop_is_fencei = _GEN_57[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_58 = {{uops_3_is_amo}, {uops_2_is_amo}, {uops_1_is_amo}, {uops_0_is_amo}}; // @[util.scala:466:20, :508:19] assign out_uop_is_amo = _GEN_58[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_59 = {{uops_3_uses_ldq}, {uops_2_uses_ldq}, {uops_1_uses_ldq}, {uops_0_uses_ldq}}; // @[util.scala:466:20, :508:19] assign out_uop_uses_ldq = _GEN_59[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_60 = {{uops_3_uses_stq}, {uops_2_uses_stq}, {uops_1_uses_stq}, {uops_0_uses_stq}}; // @[util.scala:466:20, :508:19] assign out_uop_uses_stq = _GEN_60[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_61 = {{uops_3_is_sys_pc2epc}, {uops_2_is_sys_pc2epc}, {uops_1_is_sys_pc2epc}, {uops_0_is_sys_pc2epc}}; // @[util.scala:466:20, :508:19] assign out_uop_is_sys_pc2epc = _GEN_61[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_62 = {{uops_3_is_unique}, {uops_2_is_unique}, {uops_1_is_unique}, {uops_0_is_unique}}; // @[util.scala:466:20, :508:19] assign out_uop_is_unique = _GEN_62[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_63 = {{uops_3_flush_on_commit}, {uops_2_flush_on_commit}, {uops_1_flush_on_commit}, {uops_0_flush_on_commit}}; // @[util.scala:466:20, :508:19] assign out_uop_flush_on_commit = _GEN_63[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_64 = {{uops_3_ldst_is_rs1}, {uops_2_ldst_is_rs1}, {uops_1_ldst_is_rs1}, {uops_0_ldst_is_rs1}}; // @[util.scala:466:20, :508:19] assign out_uop_ldst_is_rs1 = _GEN_64[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][5:0] _GEN_65 = {{uops_3_ldst}, {uops_2_ldst}, {uops_1_ldst}, {uops_0_ldst}}; // @[util.scala:466:20, :508:19] assign out_uop_ldst = _GEN_65[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][5:0] _GEN_66 = {{uops_3_lrs1}, {uops_2_lrs1}, {uops_1_lrs1}, {uops_0_lrs1}}; // @[util.scala:466:20, :508:19] assign out_uop_lrs1 = _GEN_66[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][5:0] _GEN_67 = {{uops_3_lrs2}, {uops_2_lrs2}, {uops_1_lrs2}, {uops_0_lrs2}}; // @[util.scala:466:20, :508:19] assign out_uop_lrs2 = _GEN_67[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][5:0] _GEN_68 = {{uops_3_lrs3}, {uops_2_lrs3}, {uops_1_lrs3}, {uops_0_lrs3}}; // @[util.scala:466:20, :508:19] assign out_uop_lrs3 = _GEN_68[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_69 = {{uops_3_ldst_val}, {uops_2_ldst_val}, {uops_1_ldst_val}, {uops_0_ldst_val}}; // @[util.scala:466:20, :508:19] assign out_uop_ldst_val = _GEN_69[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][1:0] _GEN_70 = {{uops_3_dst_rtype}, {uops_2_dst_rtype}, {uops_1_dst_rtype}, {uops_0_dst_rtype}}; // @[util.scala:466:20, :508:19] assign out_uop_dst_rtype = _GEN_70[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][1:0] _GEN_71 = {{uops_3_lrs1_rtype}, {uops_2_lrs1_rtype}, {uops_1_lrs1_rtype}, {uops_0_lrs1_rtype}}; // @[util.scala:466:20, :508:19] assign out_uop_lrs1_rtype = _GEN_71[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][1:0] _GEN_72 = {{uops_3_lrs2_rtype}, {uops_2_lrs2_rtype}, {uops_1_lrs2_rtype}, {uops_0_lrs2_rtype}}; // @[util.scala:466:20, :508:19] assign out_uop_lrs2_rtype = _GEN_72[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_73 = {{uops_3_frs3_en}, {uops_2_frs3_en}, {uops_1_frs3_en}, {uops_0_frs3_en}}; // @[util.scala:466:20, :508:19] assign out_uop_frs3_en = _GEN_73[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_74 = {{uops_3_fp_val}, {uops_2_fp_val}, {uops_1_fp_val}, {uops_0_fp_val}}; // @[util.scala:466:20, :508:19] assign out_uop_fp_val = _GEN_74[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_75 = {{uops_3_fp_single}, {uops_2_fp_single}, {uops_1_fp_single}, {uops_0_fp_single}}; // @[util.scala:466:20, :508:19] assign out_uop_fp_single = _GEN_75[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_76 = {{uops_3_xcpt_pf_if}, {uops_2_xcpt_pf_if}, {uops_1_xcpt_pf_if}, {uops_0_xcpt_pf_if}}; // @[util.scala:466:20, :508:19] assign out_uop_xcpt_pf_if = _GEN_76[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_77 = {{uops_3_xcpt_ae_if}, {uops_2_xcpt_ae_if}, {uops_1_xcpt_ae_if}, {uops_0_xcpt_ae_if}}; // @[util.scala:466:20, :508:19] assign out_uop_xcpt_ae_if = _GEN_77[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_78 = {{uops_3_xcpt_ma_if}, {uops_2_xcpt_ma_if}, {uops_1_xcpt_ma_if}, {uops_0_xcpt_ma_if}}; // @[util.scala:466:20, :508:19] assign out_uop_xcpt_ma_if = _GEN_78[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_79 = {{uops_3_bp_debug_if}, {uops_2_bp_debug_if}, {uops_1_bp_debug_if}, {uops_0_bp_debug_if}}; // @[util.scala:466:20, :508:19] assign out_uop_bp_debug_if = _GEN_79[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0] _GEN_80 = {{uops_3_bp_xcpt_if}, {uops_2_bp_xcpt_if}, {uops_1_bp_xcpt_if}, {uops_0_bp_xcpt_if}}; // @[util.scala:466:20, :508:19] assign out_uop_bp_xcpt_if = _GEN_80[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][1:0] _GEN_81 = {{uops_3_debug_fsrc}, {uops_2_debug_fsrc}, {uops_1_debug_fsrc}, {uops_0_debug_fsrc}}; // @[util.scala:466:20, :508:19] assign out_uop_debug_fsrc = _GEN_81[deq_ptr_value]; // @[Counter.scala:61:40] wire [3:0][1:0] _GEN_82 = {{uops_3_debug_tsrc}, {uops_2_debug_tsrc}, {uops_1_debug_tsrc}, {uops_0_debug_tsrc}}; // @[util.scala:466:20, :508:19] assign out_uop_debug_tsrc = _GEN_82[deq_ptr_value]; // @[Counter.scala:61:40] wire _io_deq_valid_T = ~io_empty; // @[util.scala:448:7, :476:69, :509:30] wire _io_deq_valid_T_1 = _io_deq_valid_T & _GEN_1; // @[util.scala:476:42, :509:{30,40}] wire [7:0] _io_deq_valid_T_2 = io_brupdate_b1_mispredict_mask_0 & out_uop_br_mask; // @[util.scala:118:51, :448:7, :506:17] wire _io_deq_valid_T_3 = |_io_deq_valid_T_2; // @[util.scala:118:{51,59}] wire _io_deq_valid_T_4 = ~_io_deq_valid_T_3; // @[util.scala:118:59, :509:68] wire _io_deq_valid_T_5 = _io_deq_valid_T_1 & _io_deq_valid_T_4; // @[util.scala:509:{40,65,68}] wire _io_deq_valid_T_6 = io_flush_0 & out_uop_uses_ldq; // @[util.scala:448:7, :506:17, :509:122] wire _io_deq_valid_T_7 = ~_io_deq_valid_T_6; // @[util.scala:509:{111,122}] assign _io_deq_valid_T_8 = _io_deq_valid_T_5 & _io_deq_valid_T_7; // @[util.scala:509:{65,108,111}] assign io_deq_valid_0 = _io_deq_valid_T_8; // @[util.scala:448:7, :509:108] wire [7:0] _io_deq_bits_uop_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:85:27, :89:23, :448:7] assign _io_deq_bits_uop_br_mask_T_1 = out_uop_br_mask & _io_deq_bits_uop_br_mask_T; // @[util.scala:85:{25,27}, :506:17] assign io_deq_bits_uop_br_mask_0 = _io_deq_bits_uop_br_mask_T_1; // @[util.scala:85:25, :448:7] wire [2:0] _ptr_diff_T = _GEN_2 - _GEN_3; // @[Counter.scala:77:24] wire [1:0] ptr_diff = _ptr_diff_T[1:0]; // @[util.scala:524:40] wire [2:0] _io_count_T_1 = {_io_count_T, ptr_diff}; // @[util.scala:524:40, :526:{20,32}] assign io_count = _io_count_T_1[1:0]; // @[util.scala:448:7, :526:{14,20}] wire _GEN_83 = enq_ptr_value == 2'h0; // @[Counter.scala:61:40] wire _GEN_84 = do_enq & _GEN_83; // @[util.scala:475:24, :481:16, :487:17, :489:33] wire _GEN_85 = enq_ptr_value == 2'h1; // @[Counter.scala:61:40] wire _GEN_86 = do_enq & _GEN_85; // @[util.scala:475:24, :481:16, :487:17, :489:33] wire _GEN_87 = enq_ptr_value == 2'h2; // @[Counter.scala:61:40] wire _GEN_88 = do_enq & _GEN_87; // @[util.scala:475:24, :481:16, :487:17, :489:33] wire _GEN_89 = do_enq & (&enq_ptr_value); // @[Counter.scala:61:40] always @(posedge clock) begin // @[util.scala:448:7] if (reset) begin // @[util.scala:448:7] valids_0 <= 1'h0; // @[util.scala:465:24] valids_1 <= 1'h0; // @[util.scala:465:24] valids_2 <= 1'h0; // @[util.scala:465:24] valids_3 <= 1'h0; // @[util.scala:465:24] enq_ptr_value <= 2'h0; // @[Counter.scala:61:40] deq_ptr_value <= 2'h0; // @[Counter.scala:61:40] maybe_full <= 1'h0; // @[util.scala:470:27] end else begin // @[util.scala:448:7] valids_0 <= ~(do_deq & deq_ptr_value == 2'h0) & (_GEN_84 | _valids_0_T_6); // @[Counter.scala:61:40] valids_1 <= ~(do_deq & deq_ptr_value == 2'h1) & (_GEN_86 | _valids_1_T_6); // @[Counter.scala:61:40] valids_2 <= ~(do_deq & deq_ptr_value == 2'h2) & (_GEN_88 | _valids_2_T_6); // @[Counter.scala:61:40] valids_3 <= ~(do_deq & (&deq_ptr_value)) & (_GEN_89 | _valids_3_T_6); // @[Counter.scala:61:40] if (do_enq) // @[util.scala:475:24] enq_ptr_value <= _value_T_1; // @[Counter.scala:61:40, :77:24] if (do_deq) // @[util.scala:476:24] deq_ptr_value <= _value_T_3; // @[Counter.scala:61:40, :77:24] if (~(do_enq == do_deq)) // @[util.scala:470:27, :475:24, :476:24, :500:{16,28}, :501:16] maybe_full <= do_enq; // @[util.scala:470:27, :475:24] end if (_GEN_84) begin // @[util.scala:481:16, :487:17, :489:33] uops_0_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20] uops_0_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20] uops_0_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20] uops_0_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20] uops_0_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20] uops_0_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20] uops_0_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20] uops_0_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20] uops_0_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20] uops_0_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20] uops_0_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20] uops_0_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20] uops_0_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20] uops_0_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20] uops_0_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20] uops_0_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20] uops_0_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20] uops_0_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20] uops_0_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20] uops_0_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20] uops_0_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20] uops_0_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20] uops_0_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20] uops_0_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20] uops_0_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20] uops_0_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20] uops_0_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20] uops_0_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20] uops_0_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20] uops_0_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20] uops_0_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20] uops_0_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20] uops_0_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20] uops_0_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20] uops_0_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20] uops_0_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20] uops_0_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20] uops_0_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20] uops_0_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20] uops_0_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20] uops_0_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20] uops_0_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20] uops_0_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20] uops_0_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20] uops_0_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20] uops_0_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20] uops_0_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20] uops_0_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20] uops_0_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20] uops_0_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20] uops_0_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20] uops_0_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20] uops_0_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20] uops_0_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20] uops_0_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20] uops_0_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20] uops_0_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20] uops_0_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20] uops_0_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20] uops_0_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20] uops_0_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20] uops_0_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20] uops_0_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20] uops_0_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20] uops_0_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20] uops_0_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20] uops_0_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20] uops_0_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20] end if (do_enq & _GEN_83) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33] uops_0_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20] else if (valids_0) // @[util.scala:465:24] uops_0_br_mask <= _uops_0_br_mask_T_1; // @[util.scala:89:21, :466:20] if (_GEN_86) begin // @[util.scala:481:16, :487:17, :489:33] uops_1_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20] uops_1_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20] uops_1_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20] uops_1_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20] uops_1_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20] uops_1_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20] uops_1_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20] uops_1_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20] uops_1_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20] uops_1_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20] uops_1_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20] uops_1_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20] uops_1_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20] uops_1_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20] uops_1_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20] uops_1_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20] uops_1_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20] uops_1_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20] uops_1_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20] uops_1_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20] uops_1_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20] uops_1_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20] uops_1_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20] uops_1_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20] uops_1_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20] uops_1_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20] uops_1_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20] uops_1_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20] uops_1_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20] uops_1_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20] uops_1_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20] uops_1_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20] uops_1_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20] uops_1_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20] uops_1_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20] uops_1_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20] uops_1_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20] uops_1_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20] uops_1_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20] uops_1_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20] uops_1_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20] uops_1_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20] uops_1_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20] uops_1_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20] uops_1_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20] uops_1_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20] uops_1_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20] uops_1_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20] uops_1_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20] uops_1_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20] uops_1_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20] uops_1_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20] uops_1_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20] uops_1_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20] uops_1_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20] uops_1_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20] uops_1_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20] uops_1_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20] uops_1_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20] uops_1_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20] uops_1_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20] uops_1_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20] uops_1_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20] uops_1_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20] uops_1_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20] uops_1_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20] uops_1_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20] uops_1_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20] end if (do_enq & _GEN_85) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33] uops_1_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20] else if (valids_1) // @[util.scala:465:24] uops_1_br_mask <= _uops_1_br_mask_T_1; // @[util.scala:89:21, :466:20] if (_GEN_88) begin // @[util.scala:481:16, :487:17, :489:33] uops_2_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20] uops_2_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20] uops_2_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20] uops_2_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20] uops_2_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20] uops_2_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20] uops_2_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20] uops_2_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20] uops_2_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20] uops_2_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20] uops_2_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20] uops_2_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20] uops_2_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20] uops_2_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20] uops_2_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20] uops_2_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20] uops_2_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20] uops_2_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20] uops_2_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20] uops_2_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20] uops_2_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20] uops_2_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20] uops_2_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20] uops_2_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20] uops_2_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20] uops_2_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20] uops_2_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20] uops_2_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20] uops_2_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20] uops_2_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20] uops_2_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20] uops_2_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20] uops_2_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20] uops_2_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20] uops_2_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20] uops_2_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20] uops_2_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20] uops_2_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20] uops_2_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20] uops_2_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20] uops_2_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20] uops_2_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20] uops_2_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20] uops_2_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20] uops_2_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20] uops_2_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20] uops_2_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20] uops_2_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20] uops_2_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20] uops_2_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20] uops_2_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20] uops_2_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20] uops_2_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20] uops_2_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20] uops_2_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20] uops_2_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20] uops_2_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20] uops_2_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20] uops_2_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20] uops_2_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20] uops_2_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20] uops_2_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20] uops_2_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20] uops_2_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20] uops_2_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20] uops_2_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20] uops_2_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20] uops_2_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20] end if (do_enq & _GEN_87) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33] uops_2_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20] else if (valids_2) // @[util.scala:465:24] uops_2_br_mask <= _uops_2_br_mask_T_1; // @[util.scala:89:21, :466:20] if (_GEN_89) begin // @[util.scala:481:16, :487:17, :489:33] uops_3_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20] uops_3_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20] uops_3_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20] uops_3_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20] uops_3_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20] uops_3_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20] uops_3_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20] uops_3_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20] uops_3_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20] uops_3_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20] uops_3_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20] uops_3_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20] uops_3_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20] uops_3_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20] uops_3_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20] uops_3_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20] uops_3_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20] uops_3_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20] uops_3_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20] uops_3_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20] uops_3_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20] uops_3_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20] uops_3_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20] uops_3_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20] uops_3_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20] uops_3_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20] uops_3_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20] uops_3_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20] uops_3_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20] uops_3_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20] uops_3_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20] uops_3_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20] uops_3_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20] uops_3_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20] uops_3_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20] uops_3_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20] uops_3_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20] uops_3_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20] uops_3_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20] uops_3_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20] uops_3_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20] uops_3_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20] uops_3_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20] uops_3_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20] uops_3_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20] uops_3_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20] uops_3_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20] uops_3_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20] uops_3_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20] uops_3_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20] uops_3_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20] uops_3_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20] uops_3_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20] uops_3_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20] uops_3_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20] uops_3_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20] uops_3_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20] uops_3_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20] uops_3_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20] uops_3_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20] uops_3_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20] uops_3_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20] uops_3_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20] uops_3_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20] uops_3_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20] uops_3_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20] uops_3_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20] uops_3_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20] end if (do_enq & (&enq_ptr_value)) // @[Counter.scala:61:40] uops_3_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20] else if (valids_3) // @[util.scala:465:24] uops_3_br_mask <= _uops_3_br_mask_T_1; // @[util.scala:89:21, :466:20] always @(posedge) ram_4x65 ram_ext ( // @[util.scala:464:20] .R0_addr (deq_ptr_value), // @[Counter.scala:61:40] .R0_en (1'h1), .R0_clk (clock), .R0_data (_ram_ext_R0_data), .W0_addr (enq_ptr_value), // @[Counter.scala:61:40] .W0_en (do_enq), // @[util.scala:475:24] .W0_clk (clock), .W0_data ({io_enq_bits_is_hella_0, io_enq_bits_data_0}) // @[util.scala:448:7, :464:20] ); // @[util.scala:464:20] assign io_enq_ready = io_enq_ready_0; // @[util.scala:448:7] assign io_deq_valid = io_deq_valid_0; // @[util.scala:448:7] assign io_deq_bits_uop_uopc = io_deq_bits_uop_uopc_0; // @[util.scala:448:7] assign io_deq_bits_uop_inst = io_deq_bits_uop_inst_0; // @[util.scala:448:7] assign io_deq_bits_uop_debug_inst = io_deq_bits_uop_debug_inst_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_rvc = io_deq_bits_uop_is_rvc_0; // @[util.scala:448:7] assign io_deq_bits_uop_debug_pc = io_deq_bits_uop_debug_pc_0; // @[util.scala:448:7] assign io_deq_bits_uop_iq_type = io_deq_bits_uop_iq_type_0; // @[util.scala:448:7] assign io_deq_bits_uop_fu_code = io_deq_bits_uop_fu_code_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_br_type = io_deq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_op1_sel = io_deq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_op2_sel = io_deq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_imm_sel = io_deq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_op_fcn = io_deq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_fcn_dw = io_deq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_csr_cmd = io_deq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_is_load = io_deq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_is_sta = io_deq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_is_std = io_deq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7] assign io_deq_bits_uop_iw_state = io_deq_bits_uop_iw_state_0; // @[util.scala:448:7] assign io_deq_bits_uop_iw_p1_poisoned = io_deq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7] assign io_deq_bits_uop_iw_p2_poisoned = io_deq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_br = io_deq_bits_uop_is_br_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_jalr = io_deq_bits_uop_is_jalr_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_jal = io_deq_bits_uop_is_jal_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_sfb = io_deq_bits_uop_is_sfb_0; // @[util.scala:448:7] assign io_deq_bits_uop_br_mask = io_deq_bits_uop_br_mask_0; // @[util.scala:448:7] assign io_deq_bits_uop_br_tag = io_deq_bits_uop_br_tag_0; // @[util.scala:448:7] assign io_deq_bits_uop_ftq_idx = io_deq_bits_uop_ftq_idx_0; // @[util.scala:448:7] assign io_deq_bits_uop_edge_inst = io_deq_bits_uop_edge_inst_0; // @[util.scala:448:7] assign io_deq_bits_uop_pc_lob = io_deq_bits_uop_pc_lob_0; // @[util.scala:448:7] assign io_deq_bits_uop_taken = io_deq_bits_uop_taken_0; // @[util.scala:448:7] assign io_deq_bits_uop_imm_packed = io_deq_bits_uop_imm_packed_0; // @[util.scala:448:7] assign io_deq_bits_uop_csr_addr = io_deq_bits_uop_csr_addr_0; // @[util.scala:448:7] assign io_deq_bits_uop_rob_idx = io_deq_bits_uop_rob_idx_0; // @[util.scala:448:7] assign io_deq_bits_uop_ldq_idx = io_deq_bits_uop_ldq_idx_0; // @[util.scala:448:7] assign io_deq_bits_uop_stq_idx = io_deq_bits_uop_stq_idx_0; // @[util.scala:448:7] assign io_deq_bits_uop_rxq_idx = io_deq_bits_uop_rxq_idx_0; // @[util.scala:448:7] assign io_deq_bits_uop_pdst = io_deq_bits_uop_pdst_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs1 = io_deq_bits_uop_prs1_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs2 = io_deq_bits_uop_prs2_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs3 = io_deq_bits_uop_prs3_0; // @[util.scala:448:7] assign io_deq_bits_uop_ppred = io_deq_bits_uop_ppred_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs1_busy = io_deq_bits_uop_prs1_busy_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs2_busy = io_deq_bits_uop_prs2_busy_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs3_busy = io_deq_bits_uop_prs3_busy_0; // @[util.scala:448:7] assign io_deq_bits_uop_ppred_busy = io_deq_bits_uop_ppred_busy_0; // @[util.scala:448:7] assign io_deq_bits_uop_stale_pdst = io_deq_bits_uop_stale_pdst_0; // @[util.scala:448:7] assign io_deq_bits_uop_exception = io_deq_bits_uop_exception_0; // @[util.scala:448:7] assign io_deq_bits_uop_exc_cause = io_deq_bits_uop_exc_cause_0; // @[util.scala:448:7] assign io_deq_bits_uop_bypassable = io_deq_bits_uop_bypassable_0; // @[util.scala:448:7] assign io_deq_bits_uop_mem_cmd = io_deq_bits_uop_mem_cmd_0; // @[util.scala:448:7] assign io_deq_bits_uop_mem_size = io_deq_bits_uop_mem_size_0; // @[util.scala:448:7] assign io_deq_bits_uop_mem_signed = io_deq_bits_uop_mem_signed_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_fence = io_deq_bits_uop_is_fence_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_fencei = io_deq_bits_uop_is_fencei_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_amo = io_deq_bits_uop_is_amo_0; // @[util.scala:448:7] assign io_deq_bits_uop_uses_ldq = io_deq_bits_uop_uses_ldq_0; // @[util.scala:448:7] assign io_deq_bits_uop_uses_stq = io_deq_bits_uop_uses_stq_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_sys_pc2epc = io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_unique = io_deq_bits_uop_is_unique_0; // @[util.scala:448:7] assign io_deq_bits_uop_flush_on_commit = io_deq_bits_uop_flush_on_commit_0; // @[util.scala:448:7] assign io_deq_bits_uop_ldst_is_rs1 = io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7] assign io_deq_bits_uop_ldst = io_deq_bits_uop_ldst_0; // @[util.scala:448:7] assign io_deq_bits_uop_lrs1 = io_deq_bits_uop_lrs1_0; // @[util.scala:448:7] assign io_deq_bits_uop_lrs2 = io_deq_bits_uop_lrs2_0; // @[util.scala:448:7] assign io_deq_bits_uop_lrs3 = io_deq_bits_uop_lrs3_0; // @[util.scala:448:7] assign io_deq_bits_uop_ldst_val = io_deq_bits_uop_ldst_val_0; // @[util.scala:448:7] assign io_deq_bits_uop_dst_rtype = io_deq_bits_uop_dst_rtype_0; // @[util.scala:448:7] assign io_deq_bits_uop_lrs1_rtype = io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7] assign io_deq_bits_uop_lrs2_rtype = io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7] assign io_deq_bits_uop_frs3_en = io_deq_bits_uop_frs3_en_0; // @[util.scala:448:7] assign io_deq_bits_uop_fp_val = io_deq_bits_uop_fp_val_0; // @[util.scala:448:7] assign io_deq_bits_uop_fp_single = io_deq_bits_uop_fp_single_0; // @[util.scala:448:7] assign io_deq_bits_uop_xcpt_pf_if = io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7] assign io_deq_bits_uop_xcpt_ae_if = io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7] assign io_deq_bits_uop_xcpt_ma_if = io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7] assign io_deq_bits_uop_bp_debug_if = io_deq_bits_uop_bp_debug_if_0; // @[util.scala:448:7] assign io_deq_bits_uop_bp_xcpt_if = io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7] assign io_deq_bits_uop_debug_fsrc = io_deq_bits_uop_debug_fsrc_0; // @[util.scala:448:7] assign io_deq_bits_uop_debug_tsrc = io_deq_bits_uop_debug_tsrc_0; // @[util.scala:448:7] assign io_deq_bits_data = io_deq_bits_data_0; // @[util.scala:448:7] assign io_deq_bits_is_hella = io_deq_bits_is_hella_0; // @[util.scala:448:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } } File Arbiter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ object TLArbiter { // (valids, select) => readys type Policy = (Integer, UInt, Bool) => UInt val lowestIndexFirst: Policy = (width, valids, select) => ~(leftOR(valids) << 1)(width-1, 0) val highestIndexFirst: Policy = (width, valids, select) => ~((rightOR(valids) >> 1).pad(width)) val roundRobin: Policy = (width, valids, select) => if (width == 1) 1.U(1.W) else { val valid = valids(width-1, 0) assert (valid === valids) val mask = RegInit(((BigInt(1) << width)-1).U(width-1,0)) val filter = Cat(valid & ~mask, valid) val unready = (rightOR(filter, width*2, width) >> 1) | (mask << width) val readys = ~((unready >> width) & unready(width-1, 0)) when (select && valid.orR) { mask := leftOR(readys & valid, width) } readys(width-1, 0) } def lowestFromSeq[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: Seq[DecoupledIO[T]]): Unit = { apply(lowestIndexFirst)(sink, sources.map(s => (edge.numBeats1(s.bits), s)):_*) } def lowest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(lowestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def highest[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(highestIndexFirst)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def robin[T <: TLChannel](edge: TLEdge, sink: DecoupledIO[T], sources: DecoupledIO[T]*): Unit = { apply(roundRobin)(sink, sources.toList.map(s => (edge.numBeats1(s.bits), s)):_*) } def apply[T <: Data](policy: Policy)(sink: DecoupledIO[T], sources: (UInt, DecoupledIO[T])*): Unit = { if (sources.isEmpty) { sink.bits := DontCare } else if (sources.size == 1) { sink :<>= sources.head._2 } else { val pairs = sources.toList val beatsIn = pairs.map(_._1) val sourcesIn = pairs.map(_._2) // The number of beats which remain to be sent val beatsLeft = RegInit(0.U) val idle = beatsLeft === 0.U val latch = idle && sink.ready // winner (if any) claims sink // Who wants access to the sink? val valids = sourcesIn.map(_.valid) // Arbitrate amongst the requests val readys = VecInit(policy(valids.size, Cat(valids.reverse), latch).asBools) // Which request wins arbitration? val winner = VecInit((readys zip valids) map { case (r,v) => r&&v }) // Confirm the policy works properly require (readys.size == valids.size) // Never two winners val prefixOR = winner.scanLeft(false.B)(_||_).init assert((prefixOR zip winner) map { case (p,w) => !p || !w } reduce {_ && _}) // If there was any request, there is a winner assert (!valids.reduce(_||_) || winner.reduce(_||_)) // Track remaining beats val maskedBeats = (winner zip beatsIn) map { case (w,b) => Mux(w, b, 0.U) } val initBeats = maskedBeats.reduce(_ | _) // no winner => 0 beats beatsLeft := Mux(latch, initBeats, beatsLeft - sink.fire) // The one-hot source granted access in the previous cycle val state = RegInit(VecInit(Seq.fill(sources.size)(false.B))) val muxState = Mux(idle, winner, state) state := muxState val allowed = Mux(idle, readys, state) (sourcesIn zip allowed) foreach { case (s, r) => s.ready := sink.ready && r } sink.valid := Mux(idle, valids.reduce(_||_), Mux1H(state, valids)) sink.bits :<= Mux1H(muxState, sourcesIn.map(_.bits)) } } } // Synthesizable unit tests import freechips.rocketchip.unittest._ abstract class DecoupledArbiterTest( policy: TLArbiter.Policy, txns: Int, timeout: Int, val numSources: Int, beatsLeftFromIdx: Int => UInt) (implicit p: Parameters) extends UnitTest(timeout) { val sources = Wire(Vec(numSources, DecoupledIO(UInt(log2Ceil(numSources).W)))) dontTouch(sources.suggestName("sources")) val sink = Wire(DecoupledIO(UInt(log2Ceil(numSources).W))) dontTouch(sink.suggestName("sink")) val count = RegInit(0.U(log2Ceil(txns).W)) val lfsr = LFSR(16, true.B) sources.zipWithIndex.map { case (z, i) => z.bits := i.U } TLArbiter(policy)(sink, sources.zipWithIndex.map { case (z, i) => (beatsLeftFromIdx(i), z) }:_*) count := count + 1.U io.finished := count >= txns.U } /** This tests that when a specific pattern of source valids are driven, * a new index from amongst that pattern is always selected, * unless one of those sources takes multiple beats, * in which case the same index should be selected until the arbiter goes idle. */ class TLDecoupledArbiterRobinTest(txns: Int = 128, timeout: Int = 500000, print: Boolean = false) (implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.roundRobin, txns, timeout, 6, i => i.U) { val lastWinner = RegInit((numSources+1).U) val beatsLeft = RegInit(0.U(log2Ceil(numSources).W)) val first = lastWinner > numSources.U val valid = lfsr(0) val ready = lfsr(15) sink.ready := ready sources.zipWithIndex.map { // pattern: every even-indexed valid is driven the same random way case (s, i) => s.valid := (if (i % 2 == 1) false.B else valid) } when (sink.fire) { if (print) { printf("TestRobin: %d\n", sink.bits) } when (beatsLeft === 0.U) { assert(lastWinner =/= sink.bits, "Round robin did not pick a new idx despite one being valid.") lastWinner := sink.bits beatsLeft := sink.bits } .otherwise { assert(lastWinner === sink.bits, "Round robin did not pick the same index over multiple beats") beatsLeft := beatsLeft - 1.U } } if (print) { when (!sink.fire) { printf("TestRobin: idle (%d %d)\n", valid, ready) } } } /** This tests that the lowest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterLowestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.lowestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertLowest(id: Int): Unit = { when (sources(id).valid) { assert((numSources-1 until id by -1).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a higher valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertLowest(_)) } } /** This tests that the highest index is always selected across random single cycle transactions. */ class TLDecoupledArbiterHighestTest(txns: Int = 128, timeout: Int = 500000)(implicit p: Parameters) extends DecoupledArbiterTest(TLArbiter.highestIndexFirst, txns, timeout, 15, _ => 0.U) { def assertHighest(id: Int): Unit = { when (sources(id).valid) { assert((0 until id).map(!sources(_).fire).foldLeft(true.B)(_&&_), s"$id was valid but a lower valid source was granted ready.") } } sources.zipWithIndex.map { case (s, i) => s.valid := lfsr(i) } sink.ready := lfsr(15) when (sink.fire) { (0 until numSources).foreach(assertHighest(_)) } } File Xbar.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressDecoder, AddressSet, RegionType, IdRange, TriStateValue} import freechips.rocketchip.util.BundleField // Trades off slave port proximity against routing resource cost object ForceFanout { def apply[T]( a: TriStateValue = TriStateValue.unset, b: TriStateValue = TriStateValue.unset, c: TriStateValue = TriStateValue.unset, d: TriStateValue = TriStateValue.unset, e: TriStateValue = TriStateValue.unset)(body: Parameters => T)(implicit p: Parameters) = { body(p.alterPartial { case ForceFanoutKey => p(ForceFanoutKey) match { case ForceFanoutParams(pa, pb, pc, pd, pe) => ForceFanoutParams(a.update(pa), b.update(pb), c.update(pc), d.update(pd), e.update(pe)) } }) } } private case class ForceFanoutParams(a: Boolean, b: Boolean, c: Boolean, d: Boolean, e: Boolean) private case object ForceFanoutKey extends Field(ForceFanoutParams(false, false, false, false, false)) class TLXbar(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters) extends LazyModule { val node = new TLNexusNode( clientFn = { seq => seq(0).v1copy( echoFields = BundleField.union(seq.flatMap(_.echoFields)), requestFields = BundleField.union(seq.flatMap(_.requestFields)), responseKeys = seq.flatMap(_.responseKeys).distinct, minLatency = seq.map(_.minLatency).min, clients = (TLXbar.mapInputIds(seq) zip seq) flatMap { case (range, port) => port.clients map { client => client.v1copy( sourceId = client.sourceId.shift(range.start) )} } ) }, managerFn = { seq => val fifoIdFactory = TLXbar.relabeler() seq(0).v1copy( responseFields = BundleField.union(seq.flatMap(_.responseFields)), requestKeys = seq.flatMap(_.requestKeys).distinct, minLatency = seq.map(_.minLatency).min, endSinkId = TLXbar.mapOutputIds(seq).map(_.end).max, managers = seq.flatMap { port => require (port.beatBytes == seq(0).beatBytes, s"Xbar ($name with parent $parent) data widths don't match: ${port.managers.map(_.name)} has ${port.beatBytes}B vs ${seq(0).managers.map(_.name)} has ${seq(0).beatBytes}B") val fifoIdMapper = fifoIdFactory() port.managers map { manager => manager.v1copy( fifoId = manager.fifoId.map(fifoIdMapper(_)) )} } ) } ){ override def circuitIdentity = outputs.size == 1 && inputs.size == 1 } lazy val module = new Impl class Impl extends LazyModuleImp(this) { if ((node.in.size * node.out.size) > (8*32)) { println (s"!!! WARNING !!!") println (s" Your TLXbar ($name with parent $parent) is very large, with ${node.in.size} Masters and ${node.out.size} Slaves.") println (s"!!! WARNING !!!") } val wide_bundle = TLBundleParameters.union((node.in ++ node.out).map(_._2.bundle)) override def desiredName = (Seq("TLXbar") ++ nameSuffix ++ Seq(s"i${node.in.size}_o${node.out.size}_${wide_bundle.shortName}")).mkString("_") TLXbar.circuit(policy, node.in, node.out) } } object TLXbar { def mapInputIds(ports: Seq[TLMasterPortParameters]) = assignRanges(ports.map(_.endSourceId)) def mapOutputIds(ports: Seq[TLSlavePortParameters]) = assignRanges(ports.map(_.endSinkId)) def assignRanges(sizes: Seq[Int]) = { val pow2Sizes = sizes.map { z => if (z == 0) 0 else 1 << log2Ceil(z) } val tuples = pow2Sizes.zipWithIndex.sortBy(_._1) // record old index, then sort by increasing size val starts = tuples.scanRight(0)(_._1 + _).tail // suffix-sum of the sizes = the start positions val ranges = (tuples zip starts) map { case ((sz, i), st) => (if (sz == 0) IdRange(0, 0) else IdRange(st, st + sz), i) } ranges.sortBy(_._2).map(_._1) // Restore orignal order } def relabeler() = { var idFactory = 0 () => { val fifoMap = scala.collection.mutable.HashMap.empty[Int, Int] (x: Int) => { if (fifoMap.contains(x)) fifoMap(x) else { val out = idFactory idFactory = idFactory + 1 fifoMap += (x -> out) out } } } } def circuit(policy: TLArbiter.Policy, seqIn: Seq[(TLBundle, TLEdge)], seqOut: Seq[(TLBundle, TLEdge)]) { val (io_in, edgesIn) = seqIn.unzip val (io_out, edgesOut) = seqOut.unzip // Not every master need connect to every slave on every channel; determine which connections are necessary val reachableIO = edgesIn.map { cp => edgesOut.map { mp => cp.client.clients.exists { c => mp.manager.managers.exists { m => c.visibility.exists { ca => m.address.exists { ma => ca.overlaps(ma)}}}} }.toVector}.toVector val probeIO = (edgesIn zip reachableIO).map { case (cp, reachableO) => (edgesOut zip reachableO).map { case (mp, reachable) => reachable && cp.client.anySupportProbe && mp.manager.managers.exists(_.regionType >= RegionType.TRACKED) }.toVector}.toVector val releaseIO = (edgesIn zip reachableIO).map { case (cp, reachableO) => (edgesOut zip reachableO).map { case (mp, reachable) => reachable && cp.client.anySupportProbe && mp.manager.anySupportAcquireB }.toVector}.toVector val connectAIO = reachableIO val connectBIO = probeIO val connectCIO = releaseIO val connectDIO = reachableIO val connectEIO = releaseIO def transpose[T](x: Seq[Seq[T]]) = if (x.isEmpty) Nil else Vector.tabulate(x(0).size) { i => Vector.tabulate(x.size) { j => x(j)(i) } } val connectAOI = transpose(connectAIO) val connectBOI = transpose(connectBIO) val connectCOI = transpose(connectCIO) val connectDOI = transpose(connectDIO) val connectEOI = transpose(connectEIO) // Grab the port ID mapping val inputIdRanges = TLXbar.mapInputIds(edgesIn.map(_.client)) val outputIdRanges = TLXbar.mapOutputIds(edgesOut.map(_.manager)) // We need an intermediate size of bundle with the widest possible identifiers val wide_bundle = TLBundleParameters.union(io_in.map(_.params) ++ io_out.map(_.params)) // Handle size = 1 gracefully (Chisel3 empty range is broken) def trim(id: UInt, size: Int): UInt = if (size <= 1) 0.U else id(log2Ceil(size)-1, 0) // Transform input bundle sources (sinks use global namespace on both sides) val in = Wire(Vec(io_in.size, TLBundle(wide_bundle))) for (i <- 0 until in.size) { val r = inputIdRanges(i) if (connectAIO(i).exists(x=>x)) { in(i).a.bits.user := DontCare in(i).a.squeezeAll.waiveAll :<>= io_in(i).a.squeezeAll.waiveAll in(i).a.bits.source := io_in(i).a.bits.source | r.start.U } else { in(i).a := DontCare io_in(i).a := DontCare in(i).a.valid := false.B io_in(i).a.ready := true.B } if (connectBIO(i).exists(x=>x)) { io_in(i).b.squeezeAll :<>= in(i).b.squeezeAll io_in(i).b.bits.source := trim(in(i).b.bits.source, r.size) } else { in(i).b := DontCare io_in(i).b := DontCare in(i).b.ready := true.B io_in(i).b.valid := false.B } if (connectCIO(i).exists(x=>x)) { in(i).c.bits.user := DontCare in(i).c.squeezeAll.waiveAll :<>= io_in(i).c.squeezeAll.waiveAll in(i).c.bits.source := io_in(i).c.bits.source | r.start.U } else { in(i).c := DontCare io_in(i).c := DontCare in(i).c.valid := false.B io_in(i).c.ready := true.B } if (connectDIO(i).exists(x=>x)) { io_in(i).d.squeezeAll.waiveAll :<>= in(i).d.squeezeAll.waiveAll io_in(i).d.bits.source := trim(in(i).d.bits.source, r.size) } else { in(i).d := DontCare io_in(i).d := DontCare in(i).d.ready := true.B io_in(i).d.valid := false.B } if (connectEIO(i).exists(x=>x)) { in(i).e.squeezeAll :<>= io_in(i).e.squeezeAll } else { in(i).e := DontCare io_in(i).e := DontCare in(i).e.valid := false.B io_in(i).e.ready := true.B } } // Transform output bundle sinks (sources use global namespace on both sides) val out = Wire(Vec(io_out.size, TLBundle(wide_bundle))) for (o <- 0 until out.size) { val r = outputIdRanges(o) if (connectAOI(o).exists(x=>x)) { out(o).a.bits.user := DontCare io_out(o).a.squeezeAll.waiveAll :<>= out(o).a.squeezeAll.waiveAll } else { out(o).a := DontCare io_out(o).a := DontCare out(o).a.ready := true.B io_out(o).a.valid := false.B } if (connectBOI(o).exists(x=>x)) { out(o).b.squeezeAll :<>= io_out(o).b.squeezeAll } else { out(o).b := DontCare io_out(o).b := DontCare out(o).b.valid := false.B io_out(o).b.ready := true.B } if (connectCOI(o).exists(x=>x)) { out(o).c.bits.user := DontCare io_out(o).c.squeezeAll.waiveAll :<>= out(o).c.squeezeAll.waiveAll } else { out(o).c := DontCare io_out(o).c := DontCare out(o).c.ready := true.B io_out(o).c.valid := false.B } if (connectDOI(o).exists(x=>x)) { out(o).d.squeezeAll :<>= io_out(o).d.squeezeAll out(o).d.bits.sink := io_out(o).d.bits.sink | r.start.U } else { out(o).d := DontCare io_out(o).d := DontCare out(o).d.valid := false.B io_out(o).d.ready := true.B } if (connectEOI(o).exists(x=>x)) { io_out(o).e.squeezeAll :<>= out(o).e.squeezeAll io_out(o).e.bits.sink := trim(out(o).e.bits.sink, r.size) } else { out(o).e := DontCare io_out(o).e := DontCare out(o).e.ready := true.B io_out(o).e.valid := false.B } } // Filter a list to only those elements selected def filter[T](data: Seq[T], mask: Seq[Boolean]) = (data zip mask).filter(_._2).map(_._1) // Based on input=>output connectivity, create per-input minimal address decode circuits val requiredAC = (connectAIO ++ connectCIO).distinct val outputPortFns: Map[Vector[Boolean], Seq[UInt => Bool]] = requiredAC.map { connectO => val port_addrs = edgesOut.map(_.manager.managers.flatMap(_.address)) val routingMask = AddressDecoder(filter(port_addrs, connectO)) val route_addrs = port_addrs.map(seq => AddressSet.unify(seq.map(_.widen(~routingMask)).distinct)) // Print the address mapping if (false) { println("Xbar mapping:") route_addrs.foreach { p => print(" ") p.foreach { a => print(s" ${a}") } println("") } println("--") } (connectO, route_addrs.map(seq => (addr: UInt) => seq.map(_.contains(addr)).reduce(_ || _))) }.toMap // Print the ID mapping if (false) { println(s"XBar mapping:") (edgesIn zip inputIdRanges).zipWithIndex.foreach { case ((edge, id), i) => println(s"\t$i assigned ${id} for ${edge.client.clients.map(_.name).mkString(", ")}") } println("") } val addressA = (in zip edgesIn) map { case (i, e) => e.address(i.a.bits) } val addressC = (in zip edgesIn) map { case (i, e) => e.address(i.c.bits) } def unique(x: Vector[Boolean]): Bool = (x.filter(x=>x).size <= 1).B val requestAIO = (connectAIO zip addressA) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } } val requestCIO = (connectCIO zip addressC) map { case (c, i) => outputPortFns(c).map { o => unique(c) || o(i) } } val requestBOI = out.map { o => inputIdRanges.map { i => i.contains(o.b.bits.source) } } val requestDOI = out.map { o => inputIdRanges.map { i => i.contains(o.d.bits.source) } } val requestEIO = in.map { i => outputIdRanges.map { o => o.contains(i.e.bits.sink) } } val beatsAI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.a.bits) } val beatsBO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.b.bits) } val beatsCI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.c.bits) } val beatsDO = (out zip edgesOut) map { case (o, e) => e.numBeats1(o.d.bits) } val beatsEI = (in zip edgesIn) map { case (i, e) => e.numBeats1(i.e.bits) } // Fanout the input sources to the output sinks val portsAOI = transpose((in zip requestAIO) map { case (i, r) => TLXbar.fanout(i.a, r, edgesOut.map(_.params(ForceFanoutKey).a)) }) val portsBIO = transpose((out zip requestBOI) map { case (o, r) => TLXbar.fanout(o.b, r, edgesIn .map(_.params(ForceFanoutKey).b)) }) val portsCOI = transpose((in zip requestCIO) map { case (i, r) => TLXbar.fanout(i.c, r, edgesOut.map(_.params(ForceFanoutKey).c)) }) val portsDIO = transpose((out zip requestDOI) map { case (o, r) => TLXbar.fanout(o.d, r, edgesIn .map(_.params(ForceFanoutKey).d)) }) val portsEOI = transpose((in zip requestEIO) map { case (i, r) => TLXbar.fanout(i.e, r, edgesOut.map(_.params(ForceFanoutKey).e)) }) // Arbitrate amongst the sources for (o <- 0 until out.size) { TLArbiter(policy)(out(o).a, filter(beatsAI zip portsAOI(o), connectAOI(o)):_*) TLArbiter(policy)(out(o).c, filter(beatsCI zip portsCOI(o), connectCOI(o)):_*) TLArbiter(policy)(out(o).e, filter(beatsEI zip portsEOI(o), connectEOI(o)):_*) filter(portsAOI(o), connectAOI(o).map(!_)) foreach { r => r.ready := false.B } filter(portsCOI(o), connectCOI(o).map(!_)) foreach { r => r.ready := false.B } filter(portsEOI(o), connectEOI(o).map(!_)) foreach { r => r.ready := false.B } } for (i <- 0 until in.size) { TLArbiter(policy)(in(i).b, filter(beatsBO zip portsBIO(i), connectBIO(i)):_*) TLArbiter(policy)(in(i).d, filter(beatsDO zip portsDIO(i), connectDIO(i)):_*) filter(portsBIO(i), connectBIO(i).map(!_)) foreach { r => r.ready := false.B } filter(portsDIO(i), connectDIO(i).map(!_)) foreach { r => r.ready := false.B } } } def apply(policy: TLArbiter.Policy = TLArbiter.roundRobin, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode = { val xbar = LazyModule(new TLXbar(policy, nameSuffix)) xbar.node } // Replicate an input port to each output port def fanout[T <: TLChannel](input: DecoupledIO[T], select: Seq[Bool], force: Seq[Boolean] = Nil): Seq[DecoupledIO[T]] = { val filtered = Wire(Vec(select.size, chiselTypeOf(input))) for (i <- 0 until select.size) { filtered(i).bits := (if (force.lift(i).getOrElse(false)) IdentityModule(input.bits) else input.bits) filtered(i).valid := input.valid && (select(i) || (select.size == 1).B) } input.ready := Mux1H(select, filtered.map(_.ready)) filtered } } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMXbar(nManagers: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("Xbar")) val xbar = LazyModule(new TLXbar) xbar.node := TLDelayer(0.1) := model.node := fuzz.node (0 until nManagers) foreach { n => val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff))) ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node } lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMXbarTest(nManagers: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMXbar(nManagers,txns)).module) dut.io.start := io.start io.finished := dut.io.finished } class TLMulticlientXbar(nManagers: Int, nClients: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val xbar = LazyModule(new TLXbar) val fuzzers = (0 until nClients) map { n => val fuzz = LazyModule(new TLFuzzer(txns)) xbar.node := TLDelayer(0.1) := fuzz.node fuzz } (0 until nManagers) foreach { n => val ram = LazyModule(new TLRAM(AddressSet(0x0+0x400*n, 0x3ff))) ram.node := TLFragmenter(4, 256) := TLDelayer(0.1) := xbar.node } lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzzers.last.module.io.finished } } class TLMulticlientXbarTest(nManagers: Int, nClients: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLMulticlientXbar(nManagers, nClients, txns)).module) dut.io.start := io.start io.finished := dut.io.finished }
module TLXbar_cbus_out_i1_o8_a29d64s9k1z4u( // @[Xbar.scala:74:9] input clock, // @[Xbar.scala:74:9] input reset, // @[Xbar.scala:74:9] output auto_anon_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_anon_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [8:0] auto_anon_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [28:0] auto_anon_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_anon_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_anon_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [8:0] auto_anon_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_out_7_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_7_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_7_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_7_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_7_a_bits_size, // @[LazyModuleImp.scala:107:25] output [8:0] auto_anon_out_7_a_bits_source, // @[LazyModuleImp.scala:107:25] output [20:0] auto_anon_out_7_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_7_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_7_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_7_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_7_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_7_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_7_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_anon_out_7_d_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_7_d_bits_size, // @[LazyModuleImp.scala:107:25] input [8:0] auto_anon_out_7_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_anon_out_7_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_anon_out_7_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_7_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_7_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_out_6_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_6_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_6_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_6_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_6_a_bits_size, // @[LazyModuleImp.scala:107:25] output [8:0] auto_anon_out_6_a_bits_source, // @[LazyModuleImp.scala:107:25] output [16:0] auto_anon_out_6_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_6_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_6_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_6_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_6_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_6_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_6_d_bits_size, // @[LazyModuleImp.scala:107:25] input [8:0] auto_anon_out_6_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_6_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_5_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_5_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_5_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_5_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_5_a_bits_size, // @[LazyModuleImp.scala:107:25] output [8:0] auto_anon_out_5_a_bits_source, // @[LazyModuleImp.scala:107:25] output [11:0] auto_anon_out_5_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_5_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_5_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_5_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_5_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_5_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_5_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_5_d_bits_size, // @[LazyModuleImp.scala:107:25] input [8:0] auto_anon_out_5_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_5_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_4_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_4_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_4_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_4_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_4_a_bits_size, // @[LazyModuleImp.scala:107:25] output [8:0] auto_anon_out_4_a_bits_source, // @[LazyModuleImp.scala:107:25] output [27:0] auto_anon_out_4_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_4_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_4_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_4_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_4_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_4_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_4_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_4_d_bits_size, // @[LazyModuleImp.scala:107:25] input [8:0] auto_anon_out_4_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_4_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_3_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_3_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_3_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_3_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_3_a_bits_size, // @[LazyModuleImp.scala:107:25] output [8:0] auto_anon_out_3_a_bits_source, // @[LazyModuleImp.scala:107:25] output [25:0] auto_anon_out_3_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_3_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_3_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_3_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_3_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_3_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_3_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_3_d_bits_size, // @[LazyModuleImp.scala:107:25] input [8:0] auto_anon_out_3_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_3_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_2_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_2_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_2_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_2_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_2_a_bits_size, // @[LazyModuleImp.scala:107:25] output [8:0] auto_anon_out_2_a_bits_source, // @[LazyModuleImp.scala:107:25] output [28:0] auto_anon_out_2_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_2_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_2_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_2_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_2_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_2_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_2_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_anon_out_2_d_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_2_d_bits_size, // @[LazyModuleImp.scala:107:25] input [8:0] auto_anon_out_2_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_anon_out_2_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_anon_out_2_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_2_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_2_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_1_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_1_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_1_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_1_a_bits_size, // @[LazyModuleImp.scala:107:25] output [8:0] auto_anon_out_1_a_bits_source, // @[LazyModuleImp.scala:107:25] output [25:0] auto_anon_out_1_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_1_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_1_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_1_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_1_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_1_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_anon_out_1_d_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_1_d_bits_size, // @[LazyModuleImp.scala:107:25] input [8:0] auto_anon_out_1_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_1_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_1_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_a_ready, // @[LazyModuleImp.scala:107:25] output auto_anon_out_0_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_0_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_anon_out_0_a_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_anon_out_0_a_bits_size, // @[LazyModuleImp.scala:107:25] output [8:0] auto_anon_out_0_a_bits_source, // @[LazyModuleImp.scala:107:25] output [13:0] auto_anon_out_0_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_anon_out_0_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_anon_out_0_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_anon_out_0_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_anon_out_0_d_ready, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_anon_out_0_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_anon_out_0_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_anon_out_0_d_bits_size, // @[LazyModuleImp.scala:107:25] input [8:0] auto_anon_out_0_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [63:0] auto_anon_out_0_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_anon_out_0_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); wire out_7_d_bits_sink; // @[Xbar.scala:216:19] wire [3:0] out_7_d_bits_size; // @[Xbar.scala:216:19] wire [3:0] out_6_d_bits_size; // @[Xbar.scala:216:19] wire [3:0] out_5_d_bits_size; // @[Xbar.scala:216:19] wire [3:0] out_4_d_bits_size; // @[Xbar.scala:216:19] wire [3:0] out_3_d_bits_size; // @[Xbar.scala:216:19] wire out_2_d_bits_sink; // @[Xbar.scala:216:19] wire [3:0] out_2_d_bits_size; // @[Xbar.scala:216:19] wire out_1_d_bits_sink; // @[Xbar.scala:216:19] wire [3:0] out_1_d_bits_size; // @[Xbar.scala:216:19] wire out_0_d_bits_sink; // @[Xbar.scala:216:19] wire [8:0] in_0_d_bits_source; // @[Xbar.scala:159:18] wire [8:0] in_0_a_bits_source; // @[Xbar.scala:159:18] wire auto_anon_in_a_valid_0 = auto_anon_in_a_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_a_bits_opcode_0 = auto_anon_in_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_a_bits_param_0 = auto_anon_in_a_bits_param; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_a_bits_size_0 = auto_anon_in_a_bits_size; // @[Xbar.scala:74:9] wire [8:0] auto_anon_in_a_bits_source_0 = auto_anon_in_a_bits_source; // @[Xbar.scala:74:9] wire [28:0] auto_anon_in_a_bits_address_0 = auto_anon_in_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] auto_anon_in_a_bits_mask_0 = auto_anon_in_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_a_bits_data_0 = auto_anon_in_a_bits_data; // @[Xbar.scala:74:9] wire auto_anon_in_a_bits_corrupt_0 = auto_anon_in_a_bits_corrupt; // @[Xbar.scala:74:9] wire auto_anon_in_d_ready_0 = auto_anon_in_d_ready; // @[Xbar.scala:74:9] wire auto_anon_out_7_a_ready_0 = auto_anon_out_7_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_7_d_valid_0 = auto_anon_out_7_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_7_d_bits_opcode_0 = auto_anon_out_7_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_7_d_bits_param_0 = auto_anon_out_7_d_bits_param; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_7_d_bits_size_0 = auto_anon_out_7_d_bits_size; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_7_d_bits_source_0 = auto_anon_out_7_d_bits_source; // @[Xbar.scala:74:9] wire auto_anon_out_7_d_bits_sink_0 = auto_anon_out_7_d_bits_sink; // @[Xbar.scala:74:9] wire auto_anon_out_7_d_bits_denied_0 = auto_anon_out_7_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_7_d_bits_data_0 = auto_anon_out_7_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_7_d_bits_corrupt_0 = auto_anon_out_7_d_bits_corrupt; // @[Xbar.scala:74:9] wire auto_anon_out_6_a_ready_0 = auto_anon_out_6_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_6_d_valid_0 = auto_anon_out_6_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_6_d_bits_size_0 = auto_anon_out_6_d_bits_size; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_6_d_bits_source_0 = auto_anon_out_6_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_6_d_bits_data_0 = auto_anon_out_6_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_5_a_ready_0 = auto_anon_out_5_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_5_d_valid_0 = auto_anon_out_5_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_5_d_bits_opcode_0 = auto_anon_out_5_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_5_d_bits_size_0 = auto_anon_out_5_d_bits_size; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_5_d_bits_source_0 = auto_anon_out_5_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_5_d_bits_data_0 = auto_anon_out_5_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_4_a_ready_0 = auto_anon_out_4_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_4_d_valid_0 = auto_anon_out_4_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_4_d_bits_opcode_0 = auto_anon_out_4_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_4_d_bits_size_0 = auto_anon_out_4_d_bits_size; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_4_d_bits_source_0 = auto_anon_out_4_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_4_d_bits_data_0 = auto_anon_out_4_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_3_a_ready_0 = auto_anon_out_3_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_3_d_valid_0 = auto_anon_out_3_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_3_d_bits_opcode_0 = auto_anon_out_3_d_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_3_d_bits_size_0 = auto_anon_out_3_d_bits_size; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_3_d_bits_source_0 = auto_anon_out_3_d_bits_source; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_3_d_bits_data_0 = auto_anon_out_3_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_2_a_ready_0 = auto_anon_out_2_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_valid_0 = auto_anon_out_2_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_d_bits_opcode_0 = auto_anon_out_2_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_2_d_bits_param_0 = auto_anon_out_2_d_bits_param; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_d_bits_size_0 = auto_anon_out_2_d_bits_size; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_2_d_bits_source_0 = auto_anon_out_2_d_bits_source; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_bits_sink_0 = auto_anon_out_2_d_bits_sink; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_bits_denied_0 = auto_anon_out_2_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_2_d_bits_data_0 = auto_anon_out_2_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_bits_corrupt_0 = auto_anon_out_2_d_bits_corrupt; // @[Xbar.scala:74:9] wire auto_anon_out_1_a_ready_0 = auto_anon_out_1_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_valid_0 = auto_anon_out_1_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_d_bits_opcode_0 = auto_anon_out_1_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_1_d_bits_param_0 = auto_anon_out_1_d_bits_param; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_d_bits_size_0 = auto_anon_out_1_d_bits_size; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_1_d_bits_source_0 = auto_anon_out_1_d_bits_source; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_bits_sink_0 = auto_anon_out_1_d_bits_sink; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_bits_denied_0 = auto_anon_out_1_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_1_d_bits_data_0 = auto_anon_out_1_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_bits_corrupt_0 = auto_anon_out_1_d_bits_corrupt; // @[Xbar.scala:74:9] wire auto_anon_out_0_a_ready_0 = auto_anon_out_0_a_ready; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_valid_0 = auto_anon_out_0_d_valid; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_0_d_bits_opcode_0 = auto_anon_out_0_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_0_d_bits_param_0 = auto_anon_out_0_d_bits_param; // @[Xbar.scala:74:9] wire [3:0] auto_anon_out_0_d_bits_size_0 = auto_anon_out_0_d_bits_size; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_0_d_bits_source_0 = auto_anon_out_0_d_bits_source; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_bits_sink_0 = auto_anon_out_0_d_bits_sink; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_bits_denied_0 = auto_anon_out_0_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_0_d_bits_data_0 = auto_anon_out_0_d_bits_data; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_bits_corrupt_0 = auto_anon_out_0_d_bits_corrupt; // @[Xbar.scala:74:9] wire _readys_T_2 = reset; // @[Arbiter.scala:22:12] wire [2:0] auto_anon_out_6_d_bits_opcode = 3'h1; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_5_d_bits_opcode = 3'h1; // @[MixedNode.scala:542:17] wire [2:0] out_6_d_bits_opcode = 3'h1; // @[Xbar.scala:216:19] wire [2:0] portsDIO_filtered_6_0_bits_opcode = 3'h1; // @[Xbar.scala:352:24] wire [1:0] auto_anon_out_6_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_5_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_4_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_out_3_d_bits_param = 2'h0; // @[Xbar.scala:74:9] wire [1:0] x1_anonOut_2_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] x1_anonOut_3_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] x1_anonOut_4_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] x1_anonOut_5_d_bits_param = 2'h0; // @[MixedNode.scala:542:17] wire [1:0] out_3_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] out_4_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] out_5_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] out_6_d_bits_param = 2'h0; // @[Xbar.scala:216:19] wire [1:0] _requestBOI_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_4_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_5_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_6_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_7_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_8_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_9_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_10_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_11_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_12_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_13_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _requestBOI_WIRE_14_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _requestBOI_WIRE_15_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_4_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_5_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_6_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_7_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_8_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_9_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_10_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_11_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_12_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_13_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _beatsBO_WIRE_14_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _beatsBO_WIRE_15_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] _portsBIO_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_1_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_2_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_3_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_1_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_4_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_5_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_2_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_6_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_7_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_3_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_8_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_9_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_4_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_10_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_11_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_5_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_12_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_13_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_6_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _portsBIO_WIRE_14_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [1:0] _portsBIO_WIRE_15_bits_param = 2'h0; // @[Bundles.scala:264:61] wire [1:0] portsBIO_filtered_7_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsDIO_filtered_3_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsDIO_filtered_4_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsDIO_filtered_5_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] portsDIO_filtered_6_0_bits_param = 2'h0; // @[Xbar.scala:352:24] wire [1:0] _in_0_d_bits_T_93 = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_T_94 = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_T_95 = 2'h0; // @[Mux.scala:30:73] wire [1:0] _in_0_d_bits_T_96 = 2'h0; // @[Mux.scala:30:73] wire auto_anon_out_6_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_6_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_6_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_5_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_5_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_5_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_4_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_4_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_4_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_3_d_bits_sink = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_3_d_bits_denied = 1'h0; // @[Xbar.scala:74:9] wire auto_anon_out_3_d_bits_corrupt = 1'h0; // @[Xbar.scala:74:9] wire x1_anonOut_2_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_2_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_2_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_3_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_3_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_3_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_4_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_4_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_4_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_5_d_bits_sink = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_5_d_bits_denied = 1'h0; // @[MixedNode.scala:542:17] wire x1_anonOut_5_d_bits_corrupt = 1'h0; // @[MixedNode.scala:542:17] wire out_3_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire out_3_d_bits_denied = 1'h0; // @[Xbar.scala:216:19] wire out_3_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19] wire out_4_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire out_4_d_bits_denied = 1'h0; // @[Xbar.scala:216:19] wire out_4_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19] wire out_5_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire out_5_d_bits_denied = 1'h0; // @[Xbar.scala:216:19] wire out_5_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19] wire out_6_d_bits_sink = 1'h0; // @[Xbar.scala:216:19] wire out_6_d_bits_denied = 1'h0; // @[Xbar.scala:216:19] wire out_6_d_bits_corrupt = 1'h0; // @[Xbar.scala:216:19] wire _out_3_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire _out_4_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire _out_5_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire _out_6_d_bits_sink_T = 1'h0; // @[Xbar.scala:251:53] wire _addressC_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _addressC_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _addressC_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _requestBOI_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_5 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_4_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_4_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_5_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_5_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_10 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_6_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_6_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_6_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_7_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_7_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_7_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_15 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_8_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_8_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_8_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_9_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_9_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_9_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_20 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_10_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_10_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_10_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_11_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_11_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_11_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_25 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_12_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_12_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_12_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_13_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_13_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_13_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_30 = 1'h0; // @[Parameters.scala:54:10] wire _requestBOI_WIRE_14_ready = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_14_valid = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_14_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _requestBOI_WIRE_15_ready = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_15_valid = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_WIRE_15_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _requestBOI_T_35 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_5 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_10 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_15 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_20 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_25 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_30 = 1'h0; // @[Parameters.scala:54:10] wire _requestDOI_T_35 = 1'h0; // @[Parameters.scala:54:10] wire _requestEIO_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_2_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_2_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_2_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_3_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_3_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_3_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_4_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_4_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_4_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_5_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_5_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_5_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_6_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_6_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_6_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_7_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_7_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_7_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_8_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_8_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_8_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_9_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_9_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_9_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_10_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_10_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_10_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_11_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_11_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_11_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_12_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_12_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_12_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_13_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_13_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_13_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_14_ready = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_14_valid = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_14_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _requestEIO_WIRE_15_ready = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_15_valid = 1'h0; // @[Bundles.scala:267:61] wire _requestEIO_WIRE_15_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _beatsBO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_1 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_4_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_4_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_5_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_5_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_2 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_6_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_6_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_6_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_7_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_7_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_7_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_3 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_8_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_8_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_8_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_9_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_9_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_9_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_4 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_10_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_10_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_10_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_11_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_11_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_11_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_5 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_12_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_12_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_12_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_13_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_13_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_13_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_6 = 1'h0; // @[Edges.scala:97:37] wire _beatsBO_WIRE_14_ready = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_14_valid = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_14_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _beatsBO_WIRE_15_ready = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_15_valid = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_WIRE_15_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire _beatsBO_opdata_T_7 = 1'h0; // @[Edges.scala:97:37] wire _beatsCI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _beatsCI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _beatsCI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire beatsCI_opdata = 1'h0; // @[Edges.scala:102:36] wire _beatsEI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _beatsEI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _beatsEI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire _portsBIO_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_1_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_1_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_2_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_2_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_3_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_3_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_1_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_1_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_1_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_3 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_4_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_4_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_5_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_5_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_2_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_2_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_2_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_5 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_6_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_6_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_6_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_7_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_7_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_7_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_3_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_3_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_3_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_7 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_8_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_8_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_8_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_9_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_9_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_9_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_4_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_4_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_4_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_9 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_10_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_10_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_10_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_11_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_11_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_11_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_5_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_5_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_5_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_11 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_12_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_12_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_12_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_13_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_13_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_13_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_6_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_6_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_6_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_13 = 1'h0; // @[Xbar.scala:355:40] wire _portsBIO_WIRE_14_ready = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_14_valid = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_14_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire _portsBIO_WIRE_15_ready = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_15_valid = 1'h0; // @[Bundles.scala:264:61] wire _portsBIO_WIRE_15_bits_corrupt = 1'h0; // @[Bundles.scala:264:61] wire portsBIO_filtered_7_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_7_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsBIO_filtered_7_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsBIO_filtered_0_valid_T_15 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _portsCOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _portsCOI_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire portsCOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_1_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_1_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_1_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_2_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_2_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_2_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_3_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_3_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_3_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_4_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_4_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_4_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_5_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_5_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_5_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_6_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_6_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_6_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_7_ready = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_7_valid = 1'h0; // @[Xbar.scala:352:24] wire portsCOI_filtered_7_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsCOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_1_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_2_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_3_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_4_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_5_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_6_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_filtered_7_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsCOI_T = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_1 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_2 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_3 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_4 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_5 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_6 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_7 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_8 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_9 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_10 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_11 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_12 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_13 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_T_14 = 1'h0; // @[Mux.scala:30:73] wire _portsCOI_WIRE_2 = 1'h0; // @[Mux.scala:30:73] wire portsDIO_filtered_3_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_3_0_bits_denied = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_3_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_4_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_4_0_bits_denied = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_4_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_5_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_5_0_bits_denied = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_5_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_6_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_6_0_bits_denied = 1'h0; // @[Xbar.scala:352:24] wire portsDIO_filtered_6_0_bits_corrupt = 1'h0; // @[Xbar.scala:352:24] wire _portsEOI_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_bits_sink = 1'h0; // @[Bundles.scala:267:74] wire _portsEOI_WIRE_1_ready = 1'h0; // @[Bundles.scala:267:61] wire _portsEOI_WIRE_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _portsEOI_WIRE_1_bits_sink = 1'h0; // @[Bundles.scala:267:61] wire portsEOI_filtered_0_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_0_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_0_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_1_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_1_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_1_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_2_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_2_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_2_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_3_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_3_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_3_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_4_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_4_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_4_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_5_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_5_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_5_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_6_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_6_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_6_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_7_ready = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_7_valid = 1'h0; // @[Xbar.scala:352:24] wire portsEOI_filtered_7_bits_sink = 1'h0; // @[Xbar.scala:352:24] wire _portsEOI_filtered_0_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_0_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_1_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_1_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_2_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_2_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_3_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_3_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_4_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_4_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_5_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_5_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_6_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_6_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_filtered_7_valid_T = 1'h0; // @[Xbar.scala:355:54] wire _portsEOI_filtered_7_valid_T_1 = 1'h0; // @[Xbar.scala:355:40] wire _portsEOI_T = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_1 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_2 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_3 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_4 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_5 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_6 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_7 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_8 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_9 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_10 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_11 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_12 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_13 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_T_14 = 1'h0; // @[Mux.scala:30:73] wire _portsEOI_WIRE_2 = 1'h0; // @[Mux.scala:30:73] wire _state_WIRE_0 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_1 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_2 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_3 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_4 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_5 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_6 = 1'h0; // @[Arbiter.scala:88:34] wire _state_WIRE_7 = 1'h0; // @[Arbiter.scala:88:34] wire _in_0_d_bits_T_3 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_4 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_5 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_6 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_33 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_34 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_35 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_36 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_48 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_49 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_50 = 1'h0; // @[Mux.scala:30:73] wire _in_0_d_bits_T_51 = 1'h0; // @[Mux.scala:30:73] wire _requestCIO_T_4 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_0 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_9 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_1 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_14 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_2 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_19 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_3 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_24 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_4 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_29 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_5 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_34 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_6 = 1'h1; // @[Xbar.scala:308:107] wire _requestCIO_T_39 = 1'h1; // @[Parameters.scala:137:59] wire requestCIO_0_7 = 1'h1; // @[Xbar.scala:308:107] wire _requestBOI_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_4 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_0_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_6 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_7 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_8 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_9 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_1_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_11 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_12 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_13 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_14 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_2_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_16 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_17 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_18 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_19 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_3_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_21 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_22 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_23 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_24 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_4_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_26 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_27 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_28 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_29 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_5_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_31 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_32 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_33 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_34 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_6_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestBOI_T_36 = 1'h1; // @[Parameters.scala:54:32] wire _requestBOI_T_37 = 1'h1; // @[Parameters.scala:56:32] wire _requestBOI_T_38 = 1'h1; // @[Parameters.scala:54:67] wire _requestBOI_T_39 = 1'h1; // @[Parameters.scala:57:20] wire requestBOI_7_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_4 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_0_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_6 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_7 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_8 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_9 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_1_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_11 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_12 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_13 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_14 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_2_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_16 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_17 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_18 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_19 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_3_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_21 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_22 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_23 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_24 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_4_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_26 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_27 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_28 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_29 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_5_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_31 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_32 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_33 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_34 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_6_0 = 1'h1; // @[Parameters.scala:56:48] wire _requestDOI_T_36 = 1'h1; // @[Parameters.scala:54:32] wire _requestDOI_T_37 = 1'h1; // @[Parameters.scala:56:32] wire _requestDOI_T_38 = 1'h1; // @[Parameters.scala:54:67] wire _requestDOI_T_39 = 1'h1; // @[Parameters.scala:57:20] wire requestDOI_7_0 = 1'h1; // @[Parameters.scala:56:48] wire beatsBO_opdata = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_1 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_2 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_3 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_4 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_5 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_6 = 1'h1; // @[Edges.scala:97:28] wire beatsBO_opdata_7 = 1'h1; // @[Edges.scala:97:28] wire beatsDO_opdata_6 = 1'h1; // @[Edges.scala:106:36] wire _portsBIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_4 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_6 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_8 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_10 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_12 = 1'h1; // @[Xbar.scala:355:54] wire _portsBIO_filtered_0_valid_T_14 = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_1_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_2_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_3_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_4_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_5_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_6_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsCOI_filtered_7_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_2 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_4 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_6 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_8 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_10 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_12 = 1'h1; // @[Xbar.scala:355:54] wire _portsDIO_filtered_0_valid_T_14 = 1'h1; // @[Xbar.scala:355:54] wire [63:0] _addressC_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _addressC_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _requestBOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_6_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_7_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_8_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_9_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_10_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_11_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_12_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_13_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _requestBOI_WIRE_14_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _requestBOI_WIRE_15_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_6_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_7_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_8_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_9_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_10_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_11_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_12_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_13_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsBO_WIRE_14_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _beatsBO_WIRE_15_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] _beatsCI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _beatsCI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _portsBIO_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_1_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_2_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_6_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_7_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_3_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_8_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_9_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_4_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_10_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_11_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_5_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_12_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_13_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_6_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsBIO_WIRE_14_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [63:0] _portsBIO_WIRE_15_bits_data = 64'h0; // @[Bundles.scala:264:61] wire [63:0] portsBIO_filtered_7_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] _portsCOI_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _portsCOI_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] portsCOI_filtered_0_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_1_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_2_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_3_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_4_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_5_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_6_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [63:0] portsCOI_filtered_7_bits_data = 64'h0; // @[Xbar.scala:352:24] wire [28:0] _addressC_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _addressC_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _requestCIO_T = 29'h0; // @[Parameters.scala:137:31] wire [28:0] _requestCIO_T_5 = 29'h0; // @[Parameters.scala:137:31] wire [28:0] _requestCIO_T_10 = 29'h0; // @[Parameters.scala:137:31] wire [28:0] _requestCIO_T_15 = 29'h0; // @[Parameters.scala:137:31] wire [28:0] _requestCIO_T_20 = 29'h0; // @[Parameters.scala:137:31] wire [28:0] _requestCIO_T_25 = 29'h0; // @[Parameters.scala:137:31] wire [28:0] _requestCIO_T_30 = 29'h0; // @[Parameters.scala:137:31] wire [28:0] _requestCIO_T_35 = 29'h0; // @[Parameters.scala:137:31] wire [28:0] _requestBOI_WIRE_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _requestBOI_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _requestBOI_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _requestBOI_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _requestBOI_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _requestBOI_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _requestBOI_WIRE_6_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _requestBOI_WIRE_7_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _requestBOI_WIRE_8_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _requestBOI_WIRE_9_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _requestBOI_WIRE_10_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _requestBOI_WIRE_11_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _requestBOI_WIRE_12_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _requestBOI_WIRE_13_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _requestBOI_WIRE_14_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _requestBOI_WIRE_15_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsBO_WIRE_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _beatsBO_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsBO_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _beatsBO_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsBO_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _beatsBO_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsBO_WIRE_6_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _beatsBO_WIRE_7_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsBO_WIRE_8_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _beatsBO_WIRE_9_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsBO_WIRE_10_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _beatsBO_WIRE_11_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsBO_WIRE_12_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _beatsBO_WIRE_13_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsBO_WIRE_14_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _beatsBO_WIRE_15_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] _beatsCI_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _beatsCI_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _portsBIO_WIRE_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _portsBIO_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] portsBIO_filtered_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] _portsBIO_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _portsBIO_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] portsBIO_filtered_1_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] _portsBIO_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _portsBIO_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] portsBIO_filtered_2_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] _portsBIO_WIRE_6_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _portsBIO_WIRE_7_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] portsBIO_filtered_3_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] _portsBIO_WIRE_8_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _portsBIO_WIRE_9_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] portsBIO_filtered_4_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] _portsBIO_WIRE_10_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _portsBIO_WIRE_11_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] portsBIO_filtered_5_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] _portsBIO_WIRE_12_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _portsBIO_WIRE_13_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] portsBIO_filtered_6_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] _portsBIO_WIRE_14_bits_address = 29'h0; // @[Bundles.scala:264:74] wire [28:0] _portsBIO_WIRE_15_bits_address = 29'h0; // @[Bundles.scala:264:61] wire [28:0] portsBIO_filtered_7_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] _portsCOI_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _portsCOI_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] portsCOI_filtered_0_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] portsCOI_filtered_1_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] portsCOI_filtered_2_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] portsCOI_filtered_3_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] portsCOI_filtered_4_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] portsCOI_filtered_5_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] portsCOI_filtered_6_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [28:0] portsCOI_filtered_7_bits_address = 29'h0; // @[Xbar.scala:352:24] wire [8:0] _addressC_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _addressC_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _requestBOI_WIRE_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _requestBOI_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _requestBOI_uncommonBits_T = 9'h0; // @[Parameters.scala:52:29] wire [8:0] requestBOI_uncommonBits = 9'h0; // @[Parameters.scala:52:56] wire [8:0] _requestBOI_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _requestBOI_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _requestBOI_uncommonBits_T_1 = 9'h0; // @[Parameters.scala:52:29] wire [8:0] requestBOI_uncommonBits_1 = 9'h0; // @[Parameters.scala:52:56] wire [8:0] _requestBOI_WIRE_4_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _requestBOI_WIRE_5_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _requestBOI_uncommonBits_T_2 = 9'h0; // @[Parameters.scala:52:29] wire [8:0] requestBOI_uncommonBits_2 = 9'h0; // @[Parameters.scala:52:56] wire [8:0] _requestBOI_WIRE_6_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _requestBOI_WIRE_7_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _requestBOI_uncommonBits_T_3 = 9'h0; // @[Parameters.scala:52:29] wire [8:0] requestBOI_uncommonBits_3 = 9'h0; // @[Parameters.scala:52:56] wire [8:0] _requestBOI_WIRE_8_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _requestBOI_WIRE_9_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _requestBOI_uncommonBits_T_4 = 9'h0; // @[Parameters.scala:52:29] wire [8:0] requestBOI_uncommonBits_4 = 9'h0; // @[Parameters.scala:52:56] wire [8:0] _requestBOI_WIRE_10_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _requestBOI_WIRE_11_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _requestBOI_uncommonBits_T_5 = 9'h0; // @[Parameters.scala:52:29] wire [8:0] requestBOI_uncommonBits_5 = 9'h0; // @[Parameters.scala:52:56] wire [8:0] _requestBOI_WIRE_12_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _requestBOI_WIRE_13_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _requestBOI_uncommonBits_T_6 = 9'h0; // @[Parameters.scala:52:29] wire [8:0] requestBOI_uncommonBits_6 = 9'h0; // @[Parameters.scala:52:56] wire [8:0] _requestBOI_WIRE_14_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _requestBOI_WIRE_15_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _requestBOI_uncommonBits_T_7 = 9'h0; // @[Parameters.scala:52:29] wire [8:0] requestBOI_uncommonBits_7 = 9'h0; // @[Parameters.scala:52:56] wire [8:0] _beatsBO_WIRE_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _beatsBO_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] beatsBO_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] beatsBO_0 = 9'h0; // @[Edges.scala:221:14] wire [8:0] _beatsBO_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _beatsBO_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _beatsBO_WIRE_4_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _beatsBO_WIRE_5_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _beatsBO_WIRE_6_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _beatsBO_WIRE_7_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _beatsBO_WIRE_8_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _beatsBO_WIRE_9_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _beatsBO_WIRE_10_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _beatsBO_WIRE_11_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _beatsBO_WIRE_12_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _beatsBO_WIRE_13_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _beatsBO_WIRE_14_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _beatsBO_WIRE_15_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] _beatsCI_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _beatsCI_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] beatsCI_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] beatsCI_0 = 9'h0; // @[Edges.scala:221:14] wire [8:0] _portsBIO_WIRE_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _portsBIO_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] portsBIO_filtered_0_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] _portsBIO_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _portsBIO_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] portsBIO_filtered_1_0_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] _portsBIO_WIRE_4_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _portsBIO_WIRE_5_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] portsBIO_filtered_2_0_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] _portsBIO_WIRE_6_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _portsBIO_WIRE_7_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] portsBIO_filtered_3_0_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] _portsBIO_WIRE_8_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _portsBIO_WIRE_9_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] portsBIO_filtered_4_0_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] _portsBIO_WIRE_10_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _portsBIO_WIRE_11_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] portsBIO_filtered_5_0_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] _portsBIO_WIRE_12_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _portsBIO_WIRE_13_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] portsBIO_filtered_6_0_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] _portsBIO_WIRE_14_bits_source = 9'h0; // @[Bundles.scala:264:74] wire [8:0] _portsBIO_WIRE_15_bits_source = 9'h0; // @[Bundles.scala:264:61] wire [8:0] portsBIO_filtered_7_0_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] _portsCOI_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _portsCOI_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] portsCOI_filtered_0_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] portsCOI_filtered_1_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] portsCOI_filtered_2_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] portsCOI_filtered_3_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] portsCOI_filtered_4_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] portsCOI_filtered_5_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] portsCOI_filtered_6_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [8:0] portsCOI_filtered_7_bits_source = 9'h0; // @[Xbar.scala:352:24] wire [3:0] _addressC_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _addressC_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _requestBOI_WIRE_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_6_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_7_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_8_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_9_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_10_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_11_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_12_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_13_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _requestBOI_WIRE_14_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _requestBOI_WIRE_15_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_6_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_7_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_8_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_9_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_10_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_11_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_12_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_13_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsBO_WIRE_14_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _beatsBO_WIRE_15_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] _beatsCI_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _beatsCI_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _portsBIO_WIRE_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_1_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_2_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_6_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_7_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_3_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_8_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_9_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_4_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_10_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_11_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_5_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_12_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_13_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_6_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsBIO_WIRE_14_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [3:0] _portsBIO_WIRE_15_bits_size = 4'h0; // @[Bundles.scala:264:61] wire [3:0] portsBIO_filtered_7_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] _portsCOI_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _portsCOI_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] portsCOI_filtered_0_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_1_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_2_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_3_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_4_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_5_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_6_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [3:0] portsCOI_filtered_7_bits_size = 4'h0; // @[Xbar.scala:352:24] wire [2:0] _addressC_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _addressC_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _addressC_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _requestBOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_6_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_7_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_8_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_9_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_10_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_11_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_12_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_13_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _requestBOI_WIRE_14_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _requestBOI_WIRE_15_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _beatsBO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] _beatsBO_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_1 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_2 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_2 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_6_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_7_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_3 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_3 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_8_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_9_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_4 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_4 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_10_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_11_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_5 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_5 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_12_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_13_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_6 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_6 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsBO_WIRE_14_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _beatsBO_WIRE_15_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] beatsBO_decode_7 = 3'h0; // @[Edges.scala:220:59] wire [2:0] beatsBO_7 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _beatsCI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _beatsCI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _beatsCI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _portsBIO_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_1_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_2_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_6_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_7_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_3_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_8_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_9_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_4_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_10_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_11_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_5_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_12_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_13_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_6_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsBIO_WIRE_14_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [2:0] _portsBIO_WIRE_15_bits_opcode = 3'h0; // @[Bundles.scala:264:61] wire [2:0] portsBIO_filtered_7_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] _portsCOI_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _portsCOI_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _portsCOI_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] portsCOI_filtered_0_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_0_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_1_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_1_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_2_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_2_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_3_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_3_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_4_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_4_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_5_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_5_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_6_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_6_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_7_bits_opcode = 3'h0; // @[Xbar.scala:352:24] wire [2:0] portsCOI_filtered_7_bits_param = 3'h0; // @[Xbar.scala:352:24] wire [7:0] _requestBOI_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_2_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_3_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_4_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_5_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_6_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_7_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_8_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_9_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_10_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_11_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_12_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_13_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _requestBOI_WIRE_14_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _requestBOI_WIRE_15_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_2_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_3_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_4_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_5_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_6_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_7_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_8_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_9_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_10_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_11_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_12_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_13_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _beatsBO_WIRE_14_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _beatsBO_WIRE_15_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] _portsBIO_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_1_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_2_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_3_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_1_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_4_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_5_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_2_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_6_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_7_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_3_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_8_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_9_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_4_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_10_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_11_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_5_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_12_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_13_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_6_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [7:0] _portsBIO_WIRE_14_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [7:0] _portsBIO_WIRE_15_bits_mask = 8'h0; // @[Bundles.scala:264:61] wire [7:0] portsBIO_filtered_7_0_bits_mask = 8'h0; // @[Xbar.scala:352:24] wire [11:0] _beatsBO_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _beatsCI_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _beatsBO_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [11:0] _beatsCI_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _beatsBO_decode_T = 27'hFFF; // @[package.scala:243:71] wire [26:0] _beatsCI_decode_T = 27'hFFF; // @[package.scala:243:71] wire [5:0] _beatsBO_decode_T_5 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_8 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_11 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_14 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_17 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_20 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_23 = 6'h0; // @[package.scala:243:46] wire [5:0] _beatsBO_decode_T_4 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_7 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_10 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_13 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_16 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_19 = 6'h3F; // @[package.scala:243:76] wire [5:0] _beatsBO_decode_T_22 = 6'h3F; // @[package.scala:243:76] wire [20:0] _beatsBO_decode_T_3 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_6 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_9 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_12 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_15 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_18 = 21'h3F; // @[package.scala:243:71] wire [20:0] _beatsBO_decode_T_21 = 21'h3F; // @[package.scala:243:71] wire [29:0] _requestCIO_T_1 = 30'h0; // @[Parameters.scala:137:41] wire [29:0] _requestCIO_T_2 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_3 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_6 = 30'h0; // @[Parameters.scala:137:41] wire [29:0] _requestCIO_T_7 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_8 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_11 = 30'h0; // @[Parameters.scala:137:41] wire [29:0] _requestCIO_T_12 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_13 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_16 = 30'h0; // @[Parameters.scala:137:41] wire [29:0] _requestCIO_T_17 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_18 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_21 = 30'h0; // @[Parameters.scala:137:41] wire [29:0] _requestCIO_T_22 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_23 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_26 = 30'h0; // @[Parameters.scala:137:41] wire [29:0] _requestCIO_T_27 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_28 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_31 = 30'h0; // @[Parameters.scala:137:41] wire [29:0] _requestCIO_T_32 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_33 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_36 = 30'h0; // @[Parameters.scala:137:41] wire [29:0] _requestCIO_T_37 = 30'h0; // @[Parameters.scala:137:46] wire [29:0] _requestCIO_T_38 = 30'h0; // @[Parameters.scala:137:46] wire anonIn_a_ready; // @[MixedNode.scala:551:17] wire anonIn_a_valid = auto_anon_in_a_valid_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_a_bits_opcode = auto_anon_in_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] anonIn_a_bits_param = auto_anon_in_a_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] anonIn_a_bits_size = auto_anon_in_a_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] anonIn_a_bits_source = auto_anon_in_a_bits_source_0; // @[Xbar.scala:74:9] wire [28:0] anonIn_a_bits_address = auto_anon_in_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] anonIn_a_bits_mask = auto_anon_in_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] anonIn_a_bits_data = auto_anon_in_a_bits_data_0; // @[Xbar.scala:74:9] wire anonIn_a_bits_corrupt = auto_anon_in_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire anonIn_d_ready = auto_anon_in_d_ready_0; // @[Xbar.scala:74:9] wire anonIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] anonIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] anonIn_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] anonIn_d_bits_size; // @[MixedNode.scala:551:17] wire [8:0] anonIn_d_bits_source; // @[MixedNode.scala:551:17] wire anonIn_d_bits_sink; // @[MixedNode.scala:551:17] wire anonIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] anonIn_d_bits_data; // @[MixedNode.scala:551:17] wire anonIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire x1_anonOut_6_a_ready = auto_anon_out_7_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_6_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_6_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_6_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_6_a_bits_size; // @[MixedNode.scala:542:17] wire [8:0] x1_anonOut_6_a_bits_source; // @[MixedNode.scala:542:17] wire [20:0] x1_anonOut_6_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_6_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_6_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_6_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_6_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_6_d_valid = auto_anon_out_7_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_6_d_bits_opcode = auto_anon_out_7_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] x1_anonOut_6_d_bits_param = auto_anon_out_7_d_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_6_d_bits_size = auto_anon_out_7_d_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] x1_anonOut_6_d_bits_source = auto_anon_out_7_d_bits_source_0; // @[Xbar.scala:74:9] wire x1_anonOut_6_d_bits_sink = auto_anon_out_7_d_bits_sink_0; // @[Xbar.scala:74:9] wire x1_anonOut_6_d_bits_denied = auto_anon_out_7_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_6_d_bits_data = auto_anon_out_7_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_6_d_bits_corrupt = auto_anon_out_7_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire x1_anonOut_5_a_ready = auto_anon_out_6_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_5_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_5_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_5_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_5_a_bits_size; // @[MixedNode.scala:542:17] wire [8:0] x1_anonOut_5_a_bits_source; // @[MixedNode.scala:542:17] wire [16:0] x1_anonOut_5_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_5_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_5_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_5_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_5_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_5_d_valid = auto_anon_out_6_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_5_d_bits_size = auto_anon_out_6_d_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] x1_anonOut_5_d_bits_source = auto_anon_out_6_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_5_d_bits_data = auto_anon_out_6_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_4_a_ready = auto_anon_out_5_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_4_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_4_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_4_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_4_a_bits_size; // @[MixedNode.scala:542:17] wire [8:0] x1_anonOut_4_a_bits_source; // @[MixedNode.scala:542:17] wire [11:0] x1_anonOut_4_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_4_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_4_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_4_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_4_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_4_d_valid = auto_anon_out_5_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_4_d_bits_opcode = auto_anon_out_5_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_4_d_bits_size = auto_anon_out_5_d_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] x1_anonOut_4_d_bits_source = auto_anon_out_5_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_4_d_bits_data = auto_anon_out_5_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_3_a_ready = auto_anon_out_4_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_3_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_3_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_3_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_3_a_bits_size; // @[MixedNode.scala:542:17] wire [8:0] x1_anonOut_3_a_bits_source; // @[MixedNode.scala:542:17] wire [27:0] x1_anonOut_3_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_3_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_3_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_3_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_3_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_3_d_valid = auto_anon_out_4_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_3_d_bits_opcode = auto_anon_out_4_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_3_d_bits_size = auto_anon_out_4_d_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] x1_anonOut_3_d_bits_source = auto_anon_out_4_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_3_d_bits_data = auto_anon_out_4_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_2_a_ready = auto_anon_out_3_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_2_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_2_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_2_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_2_a_bits_size; // @[MixedNode.scala:542:17] wire [8:0] x1_anonOut_2_a_bits_source; // @[MixedNode.scala:542:17] wire [25:0] x1_anonOut_2_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_2_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_2_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_2_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_2_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_2_d_valid = auto_anon_out_3_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_2_d_bits_opcode = auto_anon_out_3_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_2_d_bits_size = auto_anon_out_3_d_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] x1_anonOut_2_d_bits_source = auto_anon_out_3_d_bits_source_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_2_d_bits_data = auto_anon_out_3_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_1_a_ready = auto_anon_out_2_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_1_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_1_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_1_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_1_a_bits_size; // @[MixedNode.scala:542:17] wire [8:0] x1_anonOut_1_a_bits_source; // @[MixedNode.scala:542:17] wire [28:0] x1_anonOut_1_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_1_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_1_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_1_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_1_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_1_d_valid = auto_anon_out_2_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_1_d_bits_opcode = auto_anon_out_2_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] x1_anonOut_1_d_bits_param = auto_anon_out_2_d_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_1_d_bits_size = auto_anon_out_2_d_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] x1_anonOut_1_d_bits_source = auto_anon_out_2_d_bits_source_0; // @[Xbar.scala:74:9] wire x1_anonOut_1_d_bits_sink = auto_anon_out_2_d_bits_sink_0; // @[Xbar.scala:74:9] wire x1_anonOut_1_d_bits_denied = auto_anon_out_2_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_1_d_bits_data = auto_anon_out_2_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_1_d_bits_corrupt = auto_anon_out_2_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire x1_anonOut_a_ready = auto_anon_out_1_a_ready_0; // @[Xbar.scala:74:9] wire x1_anonOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] x1_anonOut_a_bits_size; // @[MixedNode.scala:542:17] wire [8:0] x1_anonOut_a_bits_source; // @[MixedNode.scala:542:17] wire [25:0] x1_anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] x1_anonOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] x1_anonOut_a_bits_data; // @[MixedNode.scala:542:17] wire x1_anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire x1_anonOut_d_ready; // @[MixedNode.scala:542:17] wire x1_anonOut_d_valid = auto_anon_out_1_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_d_bits_opcode = auto_anon_out_1_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] x1_anonOut_d_bits_param = auto_anon_out_1_d_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] x1_anonOut_d_bits_size = auto_anon_out_1_d_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] x1_anonOut_d_bits_source = auto_anon_out_1_d_bits_source_0; // @[Xbar.scala:74:9] wire x1_anonOut_d_bits_sink = auto_anon_out_1_d_bits_sink_0; // @[Xbar.scala:74:9] wire x1_anonOut_d_bits_denied = auto_anon_out_1_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] x1_anonOut_d_bits_data = auto_anon_out_1_d_bits_data_0; // @[Xbar.scala:74:9] wire x1_anonOut_d_bits_corrupt = auto_anon_out_1_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire anonOut_a_ready = auto_anon_out_0_a_ready_0; // @[Xbar.scala:74:9] wire anonOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] anonOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] anonOut_a_bits_param; // @[MixedNode.scala:542:17] wire [3:0] anonOut_a_bits_size; // @[MixedNode.scala:542:17] wire [8:0] anonOut_a_bits_source; // @[MixedNode.scala:542:17] wire [13:0] anonOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] anonOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] anonOut_a_bits_data; // @[MixedNode.scala:542:17] wire anonOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire anonOut_d_ready; // @[MixedNode.scala:542:17] wire anonOut_d_valid = auto_anon_out_0_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] anonOut_d_bits_opcode = auto_anon_out_0_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] anonOut_d_bits_param = auto_anon_out_0_d_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] anonOut_d_bits_size = auto_anon_out_0_d_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] anonOut_d_bits_source = auto_anon_out_0_d_bits_source_0; // @[Xbar.scala:74:9] wire anonOut_d_bits_sink = auto_anon_out_0_d_bits_sink_0; // @[Xbar.scala:74:9] wire anonOut_d_bits_denied = auto_anon_out_0_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] anonOut_d_bits_data = auto_anon_out_0_d_bits_data_0; // @[Xbar.scala:74:9] wire anonOut_d_bits_corrupt = auto_anon_out_0_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_in_a_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_in_d_bits_opcode_0; // @[Xbar.scala:74:9] wire [1:0] auto_anon_in_d_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_in_d_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] auto_anon_in_d_bits_source_0; // @[Xbar.scala:74:9] wire auto_anon_in_d_bits_sink_0; // @[Xbar.scala:74:9] wire auto_anon_in_d_bits_denied_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_in_d_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_in_d_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_in_d_valid_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_7_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_7_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_7_a_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_7_a_bits_source_0; // @[Xbar.scala:74:9] wire [20:0] auto_anon_out_7_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_7_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_7_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_7_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_7_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_7_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_6_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_6_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_6_a_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_6_a_bits_source_0; // @[Xbar.scala:74:9] wire [16:0] auto_anon_out_6_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_6_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_6_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_6_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_6_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_6_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_5_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_5_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_5_a_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_5_a_bits_source_0; // @[Xbar.scala:74:9] wire [11:0] auto_anon_out_5_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_5_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_5_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_5_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_5_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_5_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_4_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_4_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_4_a_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_4_a_bits_source_0; // @[Xbar.scala:74:9] wire [27:0] auto_anon_out_4_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_4_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_4_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_4_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_4_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_4_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_3_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_3_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_3_a_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_3_a_bits_source_0; // @[Xbar.scala:74:9] wire [25:0] auto_anon_out_3_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_3_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_3_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_3_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_3_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_3_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_2_a_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_2_a_bits_source_0; // @[Xbar.scala:74:9] wire [28:0] auto_anon_out_2_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_2_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_2_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_2_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_2_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_2_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_a_bits_param_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_1_a_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_1_a_bits_source_0; // @[Xbar.scala:74:9] wire [25:0] auto_anon_out_1_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_1_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_1_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_1_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_1_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_1_d_ready_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_0_a_bits_opcode_0; // @[Xbar.scala:74:9] wire [2:0] auto_anon_out_0_a_bits_param_0; // @[Xbar.scala:74:9] wire [3:0] auto_anon_out_0_a_bits_size_0; // @[Xbar.scala:74:9] wire [8:0] auto_anon_out_0_a_bits_source_0; // @[Xbar.scala:74:9] wire [13:0] auto_anon_out_0_a_bits_address_0; // @[Xbar.scala:74:9] wire [7:0] auto_anon_out_0_a_bits_mask_0; // @[Xbar.scala:74:9] wire [63:0] auto_anon_out_0_a_bits_data_0; // @[Xbar.scala:74:9] wire auto_anon_out_0_a_bits_corrupt_0; // @[Xbar.scala:74:9] wire auto_anon_out_0_a_valid_0; // @[Xbar.scala:74:9] wire auto_anon_out_0_d_ready_0; // @[Xbar.scala:74:9] wire in_0_a_ready; // @[Xbar.scala:159:18] assign auto_anon_in_a_ready_0 = anonIn_a_ready; // @[Xbar.scala:74:9] wire in_0_a_valid = anonIn_a_valid; // @[Xbar.scala:159:18] wire [2:0] in_0_a_bits_opcode = anonIn_a_bits_opcode; // @[Xbar.scala:159:18] wire [2:0] in_0_a_bits_param = anonIn_a_bits_param; // @[Xbar.scala:159:18] wire [3:0] in_0_a_bits_size = anonIn_a_bits_size; // @[Xbar.scala:159:18] wire [8:0] _in_0_a_bits_source_T = anonIn_a_bits_source; // @[Xbar.scala:166:55] wire [28:0] in_0_a_bits_address = anonIn_a_bits_address; // @[Xbar.scala:159:18] wire [7:0] in_0_a_bits_mask = anonIn_a_bits_mask; // @[Xbar.scala:159:18] wire [63:0] in_0_a_bits_data = anonIn_a_bits_data; // @[Xbar.scala:159:18] wire in_0_a_bits_corrupt = anonIn_a_bits_corrupt; // @[Xbar.scala:159:18] wire in_0_d_ready = anonIn_d_ready; // @[Xbar.scala:159:18] wire in_0_d_valid; // @[Xbar.scala:159:18] assign auto_anon_in_d_valid_0 = anonIn_d_valid; // @[Xbar.scala:74:9] wire [2:0] in_0_d_bits_opcode; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_opcode_0 = anonIn_d_bits_opcode; // @[Xbar.scala:74:9] wire [1:0] in_0_d_bits_param; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_param_0 = anonIn_d_bits_param; // @[Xbar.scala:74:9] wire [3:0] in_0_d_bits_size; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_size_0 = anonIn_d_bits_size; // @[Xbar.scala:74:9] wire [8:0] _anonIn_d_bits_source_T; // @[Xbar.scala:156:69] assign auto_anon_in_d_bits_source_0 = anonIn_d_bits_source; // @[Xbar.scala:74:9] wire in_0_d_bits_sink; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_sink_0 = anonIn_d_bits_sink; // @[Xbar.scala:74:9] wire in_0_d_bits_denied; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_denied_0 = anonIn_d_bits_denied; // @[Xbar.scala:74:9] wire [63:0] in_0_d_bits_data; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_data_0 = anonIn_d_bits_data; // @[Xbar.scala:74:9] wire in_0_d_bits_corrupt; // @[Xbar.scala:159:18] assign auto_anon_in_d_bits_corrupt_0 = anonIn_d_bits_corrupt; // @[Xbar.scala:74:9] wire out_0_a_ready = anonOut_a_ready; // @[Xbar.scala:216:19] wire out_0_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_valid_0 = anonOut_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_0_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_opcode_0 = anonOut_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_0_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_param_0 = anonOut_a_bits_param; // @[Xbar.scala:74:9] wire [3:0] out_0_a_bits_size; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_size_0 = anonOut_a_bits_size; // @[Xbar.scala:74:9] wire [8:0] out_0_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_source_0 = anonOut_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_0_a_bits_address_0 = anonOut_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_0_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_mask_0 = anonOut_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_0_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_data_0 = anonOut_a_bits_data; // @[Xbar.scala:74:9] wire out_0_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_0_a_bits_corrupt_0 = anonOut_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_0_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_0_d_ready_0 = anonOut_d_ready; // @[Xbar.scala:74:9] wire out_0_d_valid = anonOut_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_0_d_bits_opcode = anonOut_d_bits_opcode; // @[Xbar.scala:216:19] wire [1:0] out_0_d_bits_param = anonOut_d_bits_param; // @[Xbar.scala:216:19] wire [3:0] out_0_d_bits_size = anonOut_d_bits_size; // @[Xbar.scala:216:19] wire [8:0] out_0_d_bits_source = anonOut_d_bits_source; // @[Xbar.scala:216:19] wire _out_0_d_bits_sink_T = anonOut_d_bits_sink; // @[Xbar.scala:251:53] wire out_0_d_bits_denied = anonOut_d_bits_denied; // @[Xbar.scala:216:19] wire [63:0] out_0_d_bits_data = anonOut_d_bits_data; // @[Xbar.scala:216:19] wire out_0_d_bits_corrupt = anonOut_d_bits_corrupt; // @[Xbar.scala:216:19] wire out_1_a_ready = x1_anonOut_a_ready; // @[Xbar.scala:216:19] wire out_1_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_valid_0 = x1_anonOut_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_1_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_opcode_0 = x1_anonOut_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_1_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_param_0 = x1_anonOut_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_1_a_bits_size_0 = x1_anonOut_a_bits_size; // @[Xbar.scala:74:9] wire [8:0] out_1_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_source_0 = x1_anonOut_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_1_a_bits_address_0 = x1_anonOut_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_1_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_mask_0 = x1_anonOut_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_1_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_data_0 = x1_anonOut_a_bits_data; // @[Xbar.scala:74:9] wire out_1_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_1_a_bits_corrupt_0 = x1_anonOut_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_1_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_1_d_ready_0 = x1_anonOut_d_ready; // @[Xbar.scala:74:9] wire out_1_d_valid = x1_anonOut_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_1_d_bits_opcode = x1_anonOut_d_bits_opcode; // @[Xbar.scala:216:19] wire [1:0] out_1_d_bits_param = x1_anonOut_d_bits_param; // @[Xbar.scala:216:19] wire [8:0] out_1_d_bits_source = x1_anonOut_d_bits_source; // @[Xbar.scala:216:19] wire _out_1_d_bits_sink_T = x1_anonOut_d_bits_sink; // @[Xbar.scala:251:53] wire out_1_d_bits_denied = x1_anonOut_d_bits_denied; // @[Xbar.scala:216:19] wire [63:0] out_1_d_bits_data = x1_anonOut_d_bits_data; // @[Xbar.scala:216:19] wire out_1_d_bits_corrupt = x1_anonOut_d_bits_corrupt; // @[Xbar.scala:216:19] wire out_2_a_ready = x1_anonOut_1_a_ready; // @[Xbar.scala:216:19] wire out_2_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_valid_0 = x1_anonOut_1_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_2_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_opcode_0 = x1_anonOut_1_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_2_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_param_0 = x1_anonOut_1_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_2_a_bits_size_0 = x1_anonOut_1_a_bits_size; // @[Xbar.scala:74:9] wire [8:0] out_2_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_source_0 = x1_anonOut_1_a_bits_source; // @[Xbar.scala:74:9] wire [28:0] out_2_a_bits_address; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_address_0 = x1_anonOut_1_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_2_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_mask_0 = x1_anonOut_1_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_2_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_data_0 = x1_anonOut_1_a_bits_data; // @[Xbar.scala:74:9] wire out_2_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_2_a_bits_corrupt_0 = x1_anonOut_1_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_2_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_2_d_ready_0 = x1_anonOut_1_d_ready; // @[Xbar.scala:74:9] wire out_2_d_valid = x1_anonOut_1_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_2_d_bits_opcode = x1_anonOut_1_d_bits_opcode; // @[Xbar.scala:216:19] wire [1:0] out_2_d_bits_param = x1_anonOut_1_d_bits_param; // @[Xbar.scala:216:19] wire [8:0] out_2_d_bits_source = x1_anonOut_1_d_bits_source; // @[Xbar.scala:216:19] wire _out_2_d_bits_sink_T = x1_anonOut_1_d_bits_sink; // @[Xbar.scala:251:53] wire out_2_d_bits_denied = x1_anonOut_1_d_bits_denied; // @[Xbar.scala:216:19] wire [63:0] out_2_d_bits_data = x1_anonOut_1_d_bits_data; // @[Xbar.scala:216:19] wire out_2_d_bits_corrupt = x1_anonOut_1_d_bits_corrupt; // @[Xbar.scala:216:19] wire out_3_a_ready = x1_anonOut_2_a_ready; // @[Xbar.scala:216:19] wire out_3_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_valid_0 = x1_anonOut_2_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_3_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_opcode_0 = x1_anonOut_2_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_3_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_param_0 = x1_anonOut_2_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_3_a_bits_size_0 = x1_anonOut_2_a_bits_size; // @[Xbar.scala:74:9] wire [8:0] out_3_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_source_0 = x1_anonOut_2_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_3_a_bits_address_0 = x1_anonOut_2_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_3_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_mask_0 = x1_anonOut_2_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_3_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_data_0 = x1_anonOut_2_a_bits_data; // @[Xbar.scala:74:9] wire out_3_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_3_a_bits_corrupt_0 = x1_anonOut_2_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_3_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_3_d_ready_0 = x1_anonOut_2_d_ready; // @[Xbar.scala:74:9] wire out_3_d_valid = x1_anonOut_2_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_3_d_bits_opcode = x1_anonOut_2_d_bits_opcode; // @[Xbar.scala:216:19] wire [8:0] out_3_d_bits_source = x1_anonOut_2_d_bits_source; // @[Xbar.scala:216:19] wire [63:0] out_3_d_bits_data = x1_anonOut_2_d_bits_data; // @[Xbar.scala:216:19] wire out_4_a_ready = x1_anonOut_3_a_ready; // @[Xbar.scala:216:19] wire out_4_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_valid_0 = x1_anonOut_3_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_4_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_opcode_0 = x1_anonOut_3_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_4_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_param_0 = x1_anonOut_3_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_4_a_bits_size_0 = x1_anonOut_3_a_bits_size; // @[Xbar.scala:74:9] wire [8:0] out_4_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_source_0 = x1_anonOut_3_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_4_a_bits_address_0 = x1_anonOut_3_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_4_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_mask_0 = x1_anonOut_3_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_4_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_data_0 = x1_anonOut_3_a_bits_data; // @[Xbar.scala:74:9] wire out_4_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_4_a_bits_corrupt_0 = x1_anonOut_3_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_4_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_4_d_ready_0 = x1_anonOut_3_d_ready; // @[Xbar.scala:74:9] wire out_4_d_valid = x1_anonOut_3_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_4_d_bits_opcode = x1_anonOut_3_d_bits_opcode; // @[Xbar.scala:216:19] wire [8:0] out_4_d_bits_source = x1_anonOut_3_d_bits_source; // @[Xbar.scala:216:19] wire [63:0] out_4_d_bits_data = x1_anonOut_3_d_bits_data; // @[Xbar.scala:216:19] wire out_5_a_ready = x1_anonOut_4_a_ready; // @[Xbar.scala:216:19] wire out_5_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_valid_0 = x1_anonOut_4_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_5_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_opcode_0 = x1_anonOut_4_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_5_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_param_0 = x1_anonOut_4_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_5_a_bits_size_0 = x1_anonOut_4_a_bits_size; // @[Xbar.scala:74:9] wire [8:0] out_5_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_source_0 = x1_anonOut_4_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_5_a_bits_address_0 = x1_anonOut_4_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_5_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_mask_0 = x1_anonOut_4_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_5_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_data_0 = x1_anonOut_4_a_bits_data; // @[Xbar.scala:74:9] wire out_5_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_5_a_bits_corrupt_0 = x1_anonOut_4_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_5_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_5_d_ready_0 = x1_anonOut_4_d_ready; // @[Xbar.scala:74:9] wire out_5_d_valid = x1_anonOut_4_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_5_d_bits_opcode = x1_anonOut_4_d_bits_opcode; // @[Xbar.scala:216:19] wire [8:0] out_5_d_bits_source = x1_anonOut_4_d_bits_source; // @[Xbar.scala:216:19] wire [63:0] out_5_d_bits_data = x1_anonOut_4_d_bits_data; // @[Xbar.scala:216:19] wire out_6_a_ready = x1_anonOut_5_a_ready; // @[Xbar.scala:216:19] wire out_6_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_valid_0 = x1_anonOut_5_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_6_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_opcode_0 = x1_anonOut_5_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_6_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_param_0 = x1_anonOut_5_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_6_a_bits_size_0 = x1_anonOut_5_a_bits_size; // @[Xbar.scala:74:9] wire [8:0] out_6_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_source_0 = x1_anonOut_5_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_6_a_bits_address_0 = x1_anonOut_5_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_6_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_mask_0 = x1_anonOut_5_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_6_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_data_0 = x1_anonOut_5_a_bits_data; // @[Xbar.scala:74:9] wire out_6_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_6_a_bits_corrupt_0 = x1_anonOut_5_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_6_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_6_d_ready_0 = x1_anonOut_5_d_ready; // @[Xbar.scala:74:9] wire out_6_d_valid = x1_anonOut_5_d_valid; // @[Xbar.scala:216:19] wire [8:0] out_6_d_bits_source = x1_anonOut_5_d_bits_source; // @[Xbar.scala:216:19] wire [63:0] out_6_d_bits_data = x1_anonOut_5_d_bits_data; // @[Xbar.scala:216:19] wire out_7_a_ready = x1_anonOut_6_a_ready; // @[Xbar.scala:216:19] wire out_7_a_valid; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_valid_0 = x1_anonOut_6_a_valid; // @[Xbar.scala:74:9] wire [2:0] out_7_a_bits_opcode; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_opcode_0 = x1_anonOut_6_a_bits_opcode; // @[Xbar.scala:74:9] wire [2:0] out_7_a_bits_param; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_param_0 = x1_anonOut_6_a_bits_param; // @[Xbar.scala:74:9] assign auto_anon_out_7_a_bits_size_0 = x1_anonOut_6_a_bits_size; // @[Xbar.scala:74:9] wire [8:0] out_7_a_bits_source; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_source_0 = x1_anonOut_6_a_bits_source; // @[Xbar.scala:74:9] assign auto_anon_out_7_a_bits_address_0 = x1_anonOut_6_a_bits_address; // @[Xbar.scala:74:9] wire [7:0] out_7_a_bits_mask; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_mask_0 = x1_anonOut_6_a_bits_mask; // @[Xbar.scala:74:9] wire [63:0] out_7_a_bits_data; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_data_0 = x1_anonOut_6_a_bits_data; // @[Xbar.scala:74:9] wire out_7_a_bits_corrupt; // @[Xbar.scala:216:19] assign auto_anon_out_7_a_bits_corrupt_0 = x1_anonOut_6_a_bits_corrupt; // @[Xbar.scala:74:9] wire out_7_d_ready; // @[Xbar.scala:216:19] assign auto_anon_out_7_d_ready_0 = x1_anonOut_6_d_ready; // @[Xbar.scala:74:9] wire out_7_d_valid = x1_anonOut_6_d_valid; // @[Xbar.scala:216:19] wire [2:0] out_7_d_bits_opcode = x1_anonOut_6_d_bits_opcode; // @[Xbar.scala:216:19] wire [1:0] out_7_d_bits_param = x1_anonOut_6_d_bits_param; // @[Xbar.scala:216:19] wire [8:0] out_7_d_bits_source = x1_anonOut_6_d_bits_source; // @[Xbar.scala:216:19] wire _out_7_d_bits_sink_T = x1_anonOut_6_d_bits_sink; // @[Xbar.scala:251:53] wire out_7_d_bits_denied = x1_anonOut_6_d_bits_denied; // @[Xbar.scala:216:19] wire [63:0] out_7_d_bits_data = x1_anonOut_6_d_bits_data; // @[Xbar.scala:216:19] wire out_7_d_bits_corrupt = x1_anonOut_6_d_bits_corrupt; // @[Xbar.scala:216:19] wire _portsAOI_in_0_a_ready_WIRE; // @[Mux.scala:30:73] assign anonIn_a_ready = in_0_a_ready; // @[Xbar.scala:159:18] wire [2:0] portsAOI_filtered_0_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_1_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_2_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_3_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_4_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_5_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_6_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_7_bits_opcode = in_0_a_bits_opcode; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_0_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_1_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_2_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_3_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_4_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_5_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_6_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [2:0] portsAOI_filtered_7_bits_param = in_0_a_bits_param; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_0_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_1_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_2_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_3_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_4_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_5_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_6_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [3:0] portsAOI_filtered_7_bits_size = in_0_a_bits_size; // @[Xbar.scala:159:18, :352:24] wire [8:0] portsAOI_filtered_0_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [8:0] portsAOI_filtered_1_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [8:0] portsAOI_filtered_2_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [8:0] portsAOI_filtered_3_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [8:0] portsAOI_filtered_4_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [8:0] portsAOI_filtered_5_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [8:0] portsAOI_filtered_6_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [8:0] portsAOI_filtered_7_bits_source = in_0_a_bits_source; // @[Xbar.scala:159:18, :352:24] wire [28:0] _requestAIO_T_31 = in_0_a_bits_address; // @[Xbar.scala:159:18] wire [28:0] portsAOI_filtered_0_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [28:0] portsAOI_filtered_1_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [28:0] portsAOI_filtered_2_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [28:0] portsAOI_filtered_3_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [28:0] portsAOI_filtered_4_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [28:0] portsAOI_filtered_5_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [28:0] portsAOI_filtered_6_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [28:0] portsAOI_filtered_7_bits_address = in_0_a_bits_address; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_0_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_1_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_2_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_3_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_4_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_5_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_6_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [7:0] portsAOI_filtered_7_bits_mask = in_0_a_bits_mask; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_0_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_1_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_2_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_3_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_4_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_5_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_6_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire [63:0] portsAOI_filtered_7_bits_data = in_0_a_bits_data; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_0_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_1_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_2_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_3_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_4_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_5_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_6_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire portsAOI_filtered_7_bits_corrupt = in_0_a_bits_corrupt; // @[Xbar.scala:159:18, :352:24] wire _in_0_d_valid_T_22; // @[Arbiter.scala:96:24] assign anonIn_d_valid = in_0_d_valid; // @[Xbar.scala:159:18] wire [2:0] _in_0_d_bits_WIRE_opcode; // @[Mux.scala:30:73] assign anonIn_d_bits_opcode = in_0_d_bits_opcode; // @[Xbar.scala:159:18] wire [1:0] _in_0_d_bits_WIRE_param; // @[Mux.scala:30:73] assign anonIn_d_bits_param = in_0_d_bits_param; // @[Xbar.scala:159:18] wire [3:0] _in_0_d_bits_WIRE_size; // @[Mux.scala:30:73] assign anonIn_d_bits_size = in_0_d_bits_size; // @[Xbar.scala:159:18] wire [8:0] _in_0_d_bits_WIRE_source; // @[Mux.scala:30:73] assign _anonIn_d_bits_source_T = in_0_d_bits_source; // @[Xbar.scala:156:69, :159:18] wire _in_0_d_bits_WIRE_sink; // @[Mux.scala:30:73] assign anonIn_d_bits_sink = in_0_d_bits_sink; // @[Xbar.scala:159:18] wire _in_0_d_bits_WIRE_denied; // @[Mux.scala:30:73] assign anonIn_d_bits_denied = in_0_d_bits_denied; // @[Xbar.scala:159:18] wire [63:0] _in_0_d_bits_WIRE_data; // @[Mux.scala:30:73] assign anonIn_d_bits_data = in_0_d_bits_data; // @[Xbar.scala:159:18] wire _in_0_d_bits_WIRE_corrupt; // @[Mux.scala:30:73] assign anonIn_d_bits_corrupt = in_0_d_bits_corrupt; // @[Xbar.scala:159:18] assign in_0_a_bits_source = _in_0_a_bits_source_T; // @[Xbar.scala:159:18, :166:55] assign anonIn_d_bits_source = _anonIn_d_bits_source_T; // @[Xbar.scala:156:69] wire portsAOI_filtered_0_ready = out_0_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_0_valid; // @[Xbar.scala:352:24] assign anonOut_a_valid = out_0_a_valid; // @[Xbar.scala:216:19] assign anonOut_a_bits_opcode = out_0_a_bits_opcode; // @[Xbar.scala:216:19] assign anonOut_a_bits_param = out_0_a_bits_param; // @[Xbar.scala:216:19] assign anonOut_a_bits_size = out_0_a_bits_size; // @[Xbar.scala:216:19] assign anonOut_a_bits_source = out_0_a_bits_source; // @[Xbar.scala:216:19] assign anonOut_a_bits_mask = out_0_a_bits_mask; // @[Xbar.scala:216:19] assign anonOut_a_bits_data = out_0_a_bits_data; // @[Xbar.scala:216:19] assign anonOut_a_bits_corrupt = out_0_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_0_ready; // @[Xbar.scala:352:24] assign anonOut_d_ready = out_0_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_1 = out_0_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_0_bits_opcode = out_0_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [1:0] portsDIO_filtered_0_bits_param = out_0_d_bits_param; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_0_bits_size = out_0_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [8:0] _requestDOI_uncommonBits_T = out_0_d_bits_source; // @[Xbar.scala:216:19] wire [8:0] portsDIO_filtered_0_bits_source = out_0_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_0_bits_sink = out_0_d_bits_sink; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_0_bits_denied = out_0_d_bits_denied; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_0_bits_data = out_0_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_0_bits_corrupt = out_0_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_1_ready = out_1_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_1_valid; // @[Xbar.scala:352:24] assign x1_anonOut_a_valid = out_1_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_opcode = out_1_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_param = out_1_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_source = out_1_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_mask = out_1_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_data = out_1_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_a_bits_corrupt = out_1_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_1_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_d_ready = out_1_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_3 = out_1_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_1_0_bits_opcode = out_1_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [1:0] portsDIO_filtered_1_0_bits_param = out_1_d_bits_param; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_1_0_bits_size = out_1_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [8:0] _requestDOI_uncommonBits_T_1 = out_1_d_bits_source; // @[Xbar.scala:216:19] wire [8:0] portsDIO_filtered_1_0_bits_source = out_1_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_1_0_bits_sink = out_1_d_bits_sink; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_1_0_bits_denied = out_1_d_bits_denied; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_1_0_bits_data = out_1_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_1_0_bits_corrupt = out_1_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_2_ready = out_2_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_2_valid; // @[Xbar.scala:352:24] assign x1_anonOut_1_a_valid = out_2_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_opcode = out_2_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_param = out_2_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_source = out_2_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_address = out_2_a_bits_address; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_mask = out_2_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_data = out_2_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_1_a_bits_corrupt = out_2_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_2_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_1_d_ready = out_2_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_5 = out_2_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_2_0_bits_opcode = out_2_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [1:0] portsDIO_filtered_2_0_bits_param = out_2_d_bits_param; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_2_0_bits_size = out_2_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [8:0] _requestDOI_uncommonBits_T_2 = out_2_d_bits_source; // @[Xbar.scala:216:19] wire [8:0] portsDIO_filtered_2_0_bits_source = out_2_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_2_0_bits_sink = out_2_d_bits_sink; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_2_0_bits_denied = out_2_d_bits_denied; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_2_0_bits_data = out_2_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_2_0_bits_corrupt = out_2_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_3_ready = out_3_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_3_valid; // @[Xbar.scala:352:24] assign x1_anonOut_2_a_valid = out_3_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_opcode = out_3_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_param = out_3_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_source = out_3_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_mask = out_3_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_data = out_3_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_2_a_bits_corrupt = out_3_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_3_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_2_d_ready = out_3_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_7 = out_3_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_3_0_bits_opcode = out_3_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_3_0_bits_size = out_3_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [8:0] _requestDOI_uncommonBits_T_3 = out_3_d_bits_source; // @[Xbar.scala:216:19] wire [8:0] portsDIO_filtered_3_0_bits_source = out_3_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_3_0_bits_data = out_3_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_4_ready = out_4_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_4_valid; // @[Xbar.scala:352:24] assign x1_anonOut_3_a_valid = out_4_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_opcode = out_4_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_param = out_4_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_source = out_4_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_mask = out_4_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_data = out_4_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_3_a_bits_corrupt = out_4_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_4_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_3_d_ready = out_4_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_9 = out_4_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_4_0_bits_opcode = out_4_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_4_0_bits_size = out_4_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [8:0] _requestDOI_uncommonBits_T_4 = out_4_d_bits_source; // @[Xbar.scala:216:19] wire [8:0] portsDIO_filtered_4_0_bits_source = out_4_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_4_0_bits_data = out_4_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_5_ready = out_5_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_5_valid; // @[Xbar.scala:352:24] assign x1_anonOut_4_a_valid = out_5_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_opcode = out_5_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_param = out_5_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_source = out_5_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_mask = out_5_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_data = out_5_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_4_a_bits_corrupt = out_5_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_5_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_4_d_ready = out_5_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_11 = out_5_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_5_0_bits_opcode = out_5_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_5_0_bits_size = out_5_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [8:0] _requestDOI_uncommonBits_T_5 = out_5_d_bits_source; // @[Xbar.scala:216:19] wire [8:0] portsDIO_filtered_5_0_bits_source = out_5_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_5_0_bits_data = out_5_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_6_ready = out_6_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_6_valid; // @[Xbar.scala:352:24] assign x1_anonOut_5_a_valid = out_6_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_opcode = out_6_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_param = out_6_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_source = out_6_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_mask = out_6_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_data = out_6_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_5_a_bits_corrupt = out_6_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_6_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_5_d_ready = out_6_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_13 = out_6_d_valid; // @[Xbar.scala:216:19, :355:40] wire [3:0] portsDIO_filtered_6_0_bits_size = out_6_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [8:0] _requestDOI_uncommonBits_T_6 = out_6_d_bits_source; // @[Xbar.scala:216:19] wire [8:0] portsDIO_filtered_6_0_bits_source = out_6_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_6_0_bits_data = out_6_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_7_ready = out_7_a_ready; // @[Xbar.scala:216:19, :352:24] wire portsAOI_filtered_7_valid; // @[Xbar.scala:352:24] assign x1_anonOut_6_a_valid = out_7_a_valid; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_opcode = out_7_a_bits_opcode; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_param = out_7_a_bits_param; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_source = out_7_a_bits_source; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_mask = out_7_a_bits_mask; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_data = out_7_a_bits_data; // @[Xbar.scala:216:19] assign x1_anonOut_6_a_bits_corrupt = out_7_a_bits_corrupt; // @[Xbar.scala:216:19] wire portsDIO_filtered_7_0_ready; // @[Xbar.scala:352:24] assign x1_anonOut_6_d_ready = out_7_d_ready; // @[Xbar.scala:216:19] wire _portsDIO_filtered_0_valid_T_15 = out_7_d_valid; // @[Xbar.scala:216:19, :355:40] wire [2:0] portsDIO_filtered_7_0_bits_opcode = out_7_d_bits_opcode; // @[Xbar.scala:216:19, :352:24] wire [1:0] portsDIO_filtered_7_0_bits_param = out_7_d_bits_param; // @[Xbar.scala:216:19, :352:24] wire [3:0] portsDIO_filtered_7_0_bits_size = out_7_d_bits_size; // @[Xbar.scala:216:19, :352:24] wire [8:0] _requestDOI_uncommonBits_T_7 = out_7_d_bits_source; // @[Xbar.scala:216:19] wire [8:0] portsDIO_filtered_7_0_bits_source = out_7_d_bits_source; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_7_0_bits_sink = out_7_d_bits_sink; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_7_0_bits_denied = out_7_d_bits_denied; // @[Xbar.scala:216:19, :352:24] wire [63:0] portsDIO_filtered_7_0_bits_data = out_7_d_bits_data; // @[Xbar.scala:216:19, :352:24] wire [28:0] out_0_a_bits_address; // @[Xbar.scala:216:19] wire portsDIO_filtered_7_0_bits_corrupt = out_7_d_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire [3:0] out_1_a_bits_size; // @[Xbar.scala:216:19] wire [28:0] out_1_a_bits_address; // @[Xbar.scala:216:19] wire [3:0] out_2_a_bits_size; // @[Xbar.scala:216:19] wire [3:0] out_3_a_bits_size; // @[Xbar.scala:216:19] wire [28:0] out_3_a_bits_address; // @[Xbar.scala:216:19] wire [3:0] out_4_a_bits_size; // @[Xbar.scala:216:19] wire [28:0] out_4_a_bits_address; // @[Xbar.scala:216:19] wire [3:0] out_5_a_bits_size; // @[Xbar.scala:216:19] wire [28:0] out_5_a_bits_address; // @[Xbar.scala:216:19] wire [3:0] out_6_a_bits_size; // @[Xbar.scala:216:19] wire [28:0] out_6_a_bits_address; // @[Xbar.scala:216:19] wire [3:0] out_7_a_bits_size; // @[Xbar.scala:216:19] wire [28:0] out_7_a_bits_address; // @[Xbar.scala:216:19] assign anonOut_a_bits_address = out_0_a_bits_address[13:0]; // @[Xbar.scala:216:19, :222:41] assign out_0_d_bits_sink = _out_0_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53] assign x1_anonOut_a_bits_address = out_1_a_bits_address[25:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_a_bits_size = out_1_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_1_d_bits_size = {1'h0, x1_anonOut_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign out_1_d_bits_sink = _out_1_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53] assign x1_anonOut_1_a_bits_size = out_2_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_2_d_bits_size = {1'h0, x1_anonOut_1_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign out_2_d_bits_sink = _out_2_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53] assign x1_anonOut_2_a_bits_address = out_3_a_bits_address[25:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_2_a_bits_size = out_3_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_3_d_bits_size = {1'h0, x1_anonOut_2_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign x1_anonOut_3_a_bits_address = out_4_a_bits_address[27:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_3_a_bits_size = out_4_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_4_d_bits_size = {1'h0, x1_anonOut_3_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign x1_anonOut_4_a_bits_address = out_5_a_bits_address[11:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_4_a_bits_size = out_5_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_5_d_bits_size = {1'h0, x1_anonOut_4_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign x1_anonOut_5_a_bits_address = out_6_a_bits_address[16:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_5_a_bits_size = out_6_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_6_d_bits_size = {1'h0, x1_anonOut_5_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign x1_anonOut_6_a_bits_address = out_7_a_bits_address[20:0]; // @[Xbar.scala:216:19, :222:41] assign x1_anonOut_6_a_bits_size = out_7_a_bits_size[2:0]; // @[Xbar.scala:216:19, :222:41] assign out_7_d_bits_size = {1'h0, x1_anonOut_6_d_bits_size}; // @[Xbar.scala:216:19, :250:29] assign out_7_d_bits_sink = _out_7_d_bits_sink_T; // @[Xbar.scala:216:19, :251:53] wire [28:0] _requestAIO_T = {in_0_a_bits_address[28:14], in_0_a_bits_address[13:0] ^ 14'h3000}; // @[Xbar.scala:159:18] wire [29:0] _requestAIO_T_1 = {1'h0, _requestAIO_T}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_2 = _requestAIO_T_1 & 30'h1A113000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_3 = _requestAIO_T_2; // @[Parameters.scala:137:46] wire _requestAIO_T_4 = _requestAIO_T_3 == 30'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_0 = _requestAIO_T_4; // @[Xbar.scala:307:107] wire _portsAOI_filtered_0_valid_T = requestAIO_0_0; // @[Xbar.scala:307:107, :355:54] wire [28:0] _requestAIO_T_5 = {in_0_a_bits_address[28:26], in_0_a_bits_address[25:0] ^ 26'h2010000}; // @[Xbar.scala:159:18] wire [29:0] _requestAIO_T_6 = {1'h0, _requestAIO_T_5}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_7 = _requestAIO_T_6 & 30'h1A113000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_8 = _requestAIO_T_7; // @[Parameters.scala:137:46] wire _requestAIO_T_9 = _requestAIO_T_8 == 30'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_1 = _requestAIO_T_9; // @[Xbar.scala:307:107] wire _portsAOI_filtered_1_valid_T = requestAIO_0_1; // @[Xbar.scala:307:107, :355:54] wire [28:0] _requestAIO_T_10 = {in_0_a_bits_address[28:13], in_0_a_bits_address[12:0] ^ 13'h1000}; // @[Xbar.scala:159:18] wire [29:0] _requestAIO_T_11 = {1'h0, _requestAIO_T_10}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_12 = _requestAIO_T_11 & 30'h1A113000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_13 = _requestAIO_T_12; // @[Parameters.scala:137:46] wire _requestAIO_T_14 = _requestAIO_T_13 == 30'h0; // @[Parameters.scala:137:{46,59}] wire [28:0] _requestAIO_T_15 = in_0_a_bits_address ^ 29'h10000000; // @[Xbar.scala:159:18] wire [29:0] _requestAIO_T_16 = {1'h0, _requestAIO_T_15}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_17 = _requestAIO_T_16 & 30'h1A113000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_18 = _requestAIO_T_17; // @[Parameters.scala:137:46] wire _requestAIO_T_19 = _requestAIO_T_18 == 30'h0; // @[Parameters.scala:137:{46,59}] wire _requestAIO_T_20 = _requestAIO_T_14 | _requestAIO_T_19; // @[Xbar.scala:291:92] wire requestAIO_0_2 = _requestAIO_T_20; // @[Xbar.scala:291:92, :307:107] wire _portsAOI_filtered_2_valid_T = requestAIO_0_2; // @[Xbar.scala:307:107, :355:54] wire [28:0] _requestAIO_T_21 = {in_0_a_bits_address[28:26], in_0_a_bits_address[25:0] ^ 26'h2000000}; // @[Xbar.scala:159:18] wire [29:0] _requestAIO_T_22 = {1'h0, _requestAIO_T_21}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_23 = _requestAIO_T_22 & 30'h1A110000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_24 = _requestAIO_T_23; // @[Parameters.scala:137:46] wire _requestAIO_T_25 = _requestAIO_T_24 == 30'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_3 = _requestAIO_T_25; // @[Xbar.scala:307:107] wire _portsAOI_filtered_3_valid_T = requestAIO_0_3; // @[Xbar.scala:307:107, :355:54] wire [28:0] _requestAIO_T_26 = {in_0_a_bits_address[28], in_0_a_bits_address[27:0] ^ 28'h8000000}; // @[Xbar.scala:159:18] wire [29:0] _requestAIO_T_27 = {1'h0, _requestAIO_T_26}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_28 = _requestAIO_T_27 & 30'h18000000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_29 = _requestAIO_T_28; // @[Parameters.scala:137:46] wire _requestAIO_T_30 = _requestAIO_T_29 == 30'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_4 = _requestAIO_T_30; // @[Xbar.scala:307:107] wire _portsAOI_filtered_4_valid_T = requestAIO_0_4; // @[Xbar.scala:307:107, :355:54] wire [29:0] _requestAIO_T_32 = {1'h0, _requestAIO_T_31}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_33 = _requestAIO_T_32 & 30'h1A113000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_34 = _requestAIO_T_33; // @[Parameters.scala:137:46] wire _requestAIO_T_35 = _requestAIO_T_34 == 30'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_5 = _requestAIO_T_35; // @[Xbar.scala:307:107] wire _portsAOI_filtered_5_valid_T = requestAIO_0_5; // @[Xbar.scala:307:107, :355:54] wire [28:0] _requestAIO_T_36 = {in_0_a_bits_address[28:17], in_0_a_bits_address[16:0] ^ 17'h10000}; // @[Xbar.scala:159:18] wire [29:0] _requestAIO_T_37 = {1'h0, _requestAIO_T_36}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_38 = _requestAIO_T_37 & 30'h1A110000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_39 = _requestAIO_T_38; // @[Parameters.scala:137:46] wire _requestAIO_T_40 = _requestAIO_T_39 == 30'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_6 = _requestAIO_T_40; // @[Xbar.scala:307:107] wire _portsAOI_filtered_6_valid_T = requestAIO_0_6; // @[Xbar.scala:307:107, :355:54] wire [28:0] _requestAIO_T_41 = {in_0_a_bits_address[28:21], in_0_a_bits_address[20:0] ^ 21'h100000}; // @[Xbar.scala:159:18] wire [29:0] _requestAIO_T_42 = {1'h0, _requestAIO_T_41}; // @[Parameters.scala:137:{31,41}] wire [29:0] _requestAIO_T_43 = _requestAIO_T_42 & 30'h1A103000; // @[Parameters.scala:137:{41,46}] wire [29:0] _requestAIO_T_44 = _requestAIO_T_43; // @[Parameters.scala:137:46] wire _requestAIO_T_45 = _requestAIO_T_44 == 30'h0; // @[Parameters.scala:137:{46,59}] wire requestAIO_0_7 = _requestAIO_T_45; // @[Xbar.scala:307:107] wire _portsAOI_filtered_7_valid_T = requestAIO_0_7; // @[Xbar.scala:307:107, :355:54] wire [8:0] requestDOI_uncommonBits = _requestDOI_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [8:0] requestDOI_uncommonBits_1 = _requestDOI_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire [8:0] requestDOI_uncommonBits_2 = _requestDOI_uncommonBits_T_2; // @[Parameters.scala:52:{29,56}] wire [8:0] requestDOI_uncommonBits_3 = _requestDOI_uncommonBits_T_3; // @[Parameters.scala:52:{29,56}] wire [8:0] requestDOI_uncommonBits_4 = _requestDOI_uncommonBits_T_4; // @[Parameters.scala:52:{29,56}] wire [8:0] requestDOI_uncommonBits_5 = _requestDOI_uncommonBits_T_5; // @[Parameters.scala:52:{29,56}] wire [8:0] requestDOI_uncommonBits_6 = _requestDOI_uncommonBits_T_6; // @[Parameters.scala:52:{29,56}] wire [8:0] requestDOI_uncommonBits_7 = _requestDOI_uncommonBits_T_7; // @[Parameters.scala:52:{29,56}] wire [26:0] _beatsAI_decode_T = 27'hFFF << in_0_a_bits_size; // @[package.scala:243:71] wire [11:0] _beatsAI_decode_T_1 = _beatsAI_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _beatsAI_decode_T_2 = ~_beatsAI_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] beatsAI_decode = _beatsAI_decode_T_2[11:3]; // @[package.scala:243:46] wire _beatsAI_opdata_T = in_0_a_bits_opcode[2]; // @[Xbar.scala:159:18] wire beatsAI_opdata = ~_beatsAI_opdata_T; // @[Edges.scala:92:{28,37}] wire [8:0] beatsAI_0 = beatsAI_opdata ? beatsAI_decode : 9'h0; // @[Edges.scala:92:28, :220:59, :221:14] wire [26:0] _beatsDO_decode_T = 27'hFFF << out_0_d_bits_size; // @[package.scala:243:71] wire [11:0] _beatsDO_decode_T_1 = _beatsDO_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _beatsDO_decode_T_2 = ~_beatsDO_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] beatsDO_decode = _beatsDO_decode_T_2[11:3]; // @[package.scala:243:46] wire beatsDO_opdata = out_0_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [8:0] beatsDO_0 = beatsDO_opdata ? beatsDO_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_3 = 21'h3F << out_1_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_4 = _beatsDO_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_5 = ~_beatsDO_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_1 = _beatsDO_decode_T_5[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_1 = out_1_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_1 = beatsDO_opdata_1 ? beatsDO_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_6 = 21'h3F << out_2_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_7 = _beatsDO_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_8 = ~_beatsDO_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_2 = _beatsDO_decode_T_8[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_2 = out_2_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_2 = beatsDO_opdata_2 ? beatsDO_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_9 = 21'h3F << out_3_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_10 = _beatsDO_decode_T_9[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_11 = ~_beatsDO_decode_T_10; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_3 = _beatsDO_decode_T_11[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_3 = out_3_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_3 = beatsDO_opdata_3 ? beatsDO_decode_3 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_12 = 21'h3F << out_4_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_13 = _beatsDO_decode_T_12[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_14 = ~_beatsDO_decode_T_13; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_4 = _beatsDO_decode_T_14[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_4 = out_4_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_4 = beatsDO_opdata_4 ? beatsDO_decode_4 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_15 = 21'h3F << out_5_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_16 = _beatsDO_decode_T_15[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_17 = ~_beatsDO_decode_T_16; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_5 = _beatsDO_decode_T_17[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_5 = out_5_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_5 = beatsDO_opdata_5 ? beatsDO_decode_5 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire [20:0] _beatsDO_decode_T_18 = 21'h3F << out_6_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_19 = _beatsDO_decode_T_18[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_20 = ~_beatsDO_decode_T_19; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_6 = _beatsDO_decode_T_20[5:3]; // @[package.scala:243:46] wire [2:0] beatsDO_6 = beatsDO_decode_6; // @[Edges.scala:220:59, :221:14] wire [20:0] _beatsDO_decode_T_21 = 21'h3F << out_7_d_bits_size; // @[package.scala:243:71] wire [5:0] _beatsDO_decode_T_22 = _beatsDO_decode_T_21[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _beatsDO_decode_T_23 = ~_beatsDO_decode_T_22; // @[package.scala:243:{46,76}] wire [2:0] beatsDO_decode_7 = _beatsDO_decode_T_23[5:3]; // @[package.scala:243:46] wire beatsDO_opdata_7 = out_7_d_bits_opcode[0]; // @[Xbar.scala:216:19] wire [2:0] beatsDO_7 = beatsDO_opdata_7 ? beatsDO_decode_7 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] wire _portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:355:40] assign out_0_a_valid = portsAOI_filtered_0_valid; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_opcode = portsAOI_filtered_0_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_param = portsAOI_filtered_0_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_size = portsAOI_filtered_0_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_source = portsAOI_filtered_0_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_address = portsAOI_filtered_0_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_mask = portsAOI_filtered_0_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_data = portsAOI_filtered_0_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_0_a_bits_corrupt = portsAOI_filtered_0_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_1_valid_T_1; // @[Xbar.scala:355:40] assign out_1_a_valid = portsAOI_filtered_1_valid; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_opcode = portsAOI_filtered_1_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_param = portsAOI_filtered_1_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_size = portsAOI_filtered_1_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_source = portsAOI_filtered_1_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_address = portsAOI_filtered_1_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_mask = portsAOI_filtered_1_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_data = portsAOI_filtered_1_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_1_a_bits_corrupt = portsAOI_filtered_1_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_2_valid_T_1; // @[Xbar.scala:355:40] assign out_2_a_valid = portsAOI_filtered_2_valid; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_opcode = portsAOI_filtered_2_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_param = portsAOI_filtered_2_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_size = portsAOI_filtered_2_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_source = portsAOI_filtered_2_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_address = portsAOI_filtered_2_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_mask = portsAOI_filtered_2_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_data = portsAOI_filtered_2_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_2_a_bits_corrupt = portsAOI_filtered_2_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_3_valid_T_1; // @[Xbar.scala:355:40] assign out_3_a_valid = portsAOI_filtered_3_valid; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_opcode = portsAOI_filtered_3_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_param = portsAOI_filtered_3_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_size = portsAOI_filtered_3_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_source = portsAOI_filtered_3_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_address = portsAOI_filtered_3_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_mask = portsAOI_filtered_3_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_data = portsAOI_filtered_3_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_3_a_bits_corrupt = portsAOI_filtered_3_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_4_valid_T_1; // @[Xbar.scala:355:40] assign out_4_a_valid = portsAOI_filtered_4_valid; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_opcode = portsAOI_filtered_4_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_param = portsAOI_filtered_4_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_size = portsAOI_filtered_4_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_source = portsAOI_filtered_4_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_address = portsAOI_filtered_4_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_mask = portsAOI_filtered_4_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_data = portsAOI_filtered_4_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_4_a_bits_corrupt = portsAOI_filtered_4_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_5_valid_T_1; // @[Xbar.scala:355:40] assign out_5_a_valid = portsAOI_filtered_5_valid; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_opcode = portsAOI_filtered_5_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_param = portsAOI_filtered_5_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_size = portsAOI_filtered_5_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_source = portsAOI_filtered_5_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_address = portsAOI_filtered_5_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_mask = portsAOI_filtered_5_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_data = portsAOI_filtered_5_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_5_a_bits_corrupt = portsAOI_filtered_5_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_6_valid_T_1; // @[Xbar.scala:355:40] assign out_6_a_valid = portsAOI_filtered_6_valid; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_opcode = portsAOI_filtered_6_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_param = portsAOI_filtered_6_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_size = portsAOI_filtered_6_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_source = portsAOI_filtered_6_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_address = portsAOI_filtered_6_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_mask = portsAOI_filtered_6_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_data = portsAOI_filtered_6_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_6_a_bits_corrupt = portsAOI_filtered_6_bits_corrupt; // @[Xbar.scala:216:19, :352:24] wire _portsAOI_filtered_7_valid_T_1; // @[Xbar.scala:355:40] assign out_7_a_valid = portsAOI_filtered_7_valid; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_opcode = portsAOI_filtered_7_bits_opcode; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_param = portsAOI_filtered_7_bits_param; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_size = portsAOI_filtered_7_bits_size; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_source = portsAOI_filtered_7_bits_source; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_address = portsAOI_filtered_7_bits_address; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_mask = portsAOI_filtered_7_bits_mask; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_data = portsAOI_filtered_7_bits_data; // @[Xbar.scala:216:19, :352:24] assign out_7_a_bits_corrupt = portsAOI_filtered_7_bits_corrupt; // @[Xbar.scala:216:19, :352:24] assign _portsAOI_filtered_0_valid_T_1 = in_0_a_valid & _portsAOI_filtered_0_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_0_valid = _portsAOI_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_1_valid_T_1 = in_0_a_valid & _portsAOI_filtered_1_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_1_valid = _portsAOI_filtered_1_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_2_valid_T_1 = in_0_a_valid & _portsAOI_filtered_2_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_2_valid = _portsAOI_filtered_2_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_3_valid_T_1 = in_0_a_valid & _portsAOI_filtered_3_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_3_valid = _portsAOI_filtered_3_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_4_valid_T_1 = in_0_a_valid & _portsAOI_filtered_4_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_4_valid = _portsAOI_filtered_4_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_5_valid_T_1 = in_0_a_valid & _portsAOI_filtered_5_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_5_valid = _portsAOI_filtered_5_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_6_valid_T_1 = in_0_a_valid & _portsAOI_filtered_6_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_6_valid = _portsAOI_filtered_6_valid_T_1; // @[Xbar.scala:352:24, :355:40] assign _portsAOI_filtered_7_valid_T_1 = in_0_a_valid & _portsAOI_filtered_7_valid_T; // @[Xbar.scala:159:18, :355:{40,54}] assign portsAOI_filtered_7_valid = _portsAOI_filtered_7_valid_T_1; // @[Xbar.scala:352:24, :355:40] wire _portsAOI_in_0_a_ready_T = requestAIO_0_0 & portsAOI_filtered_0_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_1 = requestAIO_0_1 & portsAOI_filtered_1_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_2 = requestAIO_0_2 & portsAOI_filtered_2_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_3 = requestAIO_0_3 & portsAOI_filtered_3_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_4 = requestAIO_0_4 & portsAOI_filtered_4_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_5 = requestAIO_0_5 & portsAOI_filtered_5_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_6 = requestAIO_0_6 & portsAOI_filtered_6_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_7 = requestAIO_0_7 & portsAOI_filtered_7_ready; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_8 = _portsAOI_in_0_a_ready_T | _portsAOI_in_0_a_ready_T_1; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_9 = _portsAOI_in_0_a_ready_T_8 | _portsAOI_in_0_a_ready_T_2; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_10 = _portsAOI_in_0_a_ready_T_9 | _portsAOI_in_0_a_ready_T_3; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_11 = _portsAOI_in_0_a_ready_T_10 | _portsAOI_in_0_a_ready_T_4; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_12 = _portsAOI_in_0_a_ready_T_11 | _portsAOI_in_0_a_ready_T_5; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_13 = _portsAOI_in_0_a_ready_T_12 | _portsAOI_in_0_a_ready_T_6; // @[Mux.scala:30:73] wire _portsAOI_in_0_a_ready_T_14 = _portsAOI_in_0_a_ready_T_13 | _portsAOI_in_0_a_ready_T_7; // @[Mux.scala:30:73] assign _portsAOI_in_0_a_ready_WIRE = _portsAOI_in_0_a_ready_T_14; // @[Mux.scala:30:73] assign in_0_a_ready = _portsAOI_in_0_a_ready_WIRE; // @[Mux.scala:30:73] wire _filtered_0_ready_T; // @[Arbiter.scala:94:31] assign out_0_d_ready = portsDIO_filtered_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_0_valid = _portsDIO_filtered_0_valid_T_1; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_1; // @[Arbiter.scala:94:31] assign out_1_d_ready = portsDIO_filtered_1_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_1_0_valid = _portsDIO_filtered_0_valid_T_3; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_2; // @[Arbiter.scala:94:31] assign out_2_d_ready = portsDIO_filtered_2_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_2_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_2_0_valid = _portsDIO_filtered_0_valid_T_5; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_3; // @[Arbiter.scala:94:31] assign out_3_d_ready = portsDIO_filtered_3_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_3_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_3_0_valid = _portsDIO_filtered_0_valid_T_7; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_4; // @[Arbiter.scala:94:31] assign out_4_d_ready = portsDIO_filtered_4_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_4_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_4_0_valid = _portsDIO_filtered_0_valid_T_9; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_5; // @[Arbiter.scala:94:31] assign out_5_d_ready = portsDIO_filtered_5_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_5_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_5_0_valid = _portsDIO_filtered_0_valid_T_11; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_6; // @[Arbiter.scala:94:31] assign out_6_d_ready = portsDIO_filtered_6_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_6_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_6_0_valid = _portsDIO_filtered_0_valid_T_13; // @[Xbar.scala:352:24, :355:40] wire _filtered_0_ready_T_7; // @[Arbiter.scala:94:31] assign out_7_d_ready = portsDIO_filtered_7_0_ready; // @[Xbar.scala:216:19, :352:24] wire portsDIO_filtered_7_0_valid; // @[Xbar.scala:352:24] assign portsDIO_filtered_7_0_valid = _portsDIO_filtered_0_valid_T_15; // @[Xbar.scala:352:24, :355:40] reg [8:0] beatsLeft; // @[Arbiter.scala:60:30] wire idle = beatsLeft == 9'h0; // @[Arbiter.scala:60:30, :61:28] wire latch = idle & in_0_d_ready; // @[Xbar.scala:159:18] wire [1:0] readys_lo_lo = {portsDIO_filtered_1_0_valid, portsDIO_filtered_0_valid}; // @[Xbar.scala:352:24] wire [1:0] readys_lo_hi = {portsDIO_filtered_3_0_valid, portsDIO_filtered_2_0_valid}; // @[Xbar.scala:352:24] wire [3:0] readys_lo = {readys_lo_hi, readys_lo_lo}; // @[Arbiter.scala:68:51] wire [1:0] readys_hi_lo = {portsDIO_filtered_5_0_valid, portsDIO_filtered_4_0_valid}; // @[Xbar.scala:352:24] wire [1:0] readys_hi_hi = {portsDIO_filtered_7_0_valid, portsDIO_filtered_6_0_valid}; // @[Xbar.scala:352:24] wire [3:0] readys_hi = {readys_hi_hi, readys_hi_lo}; // @[Arbiter.scala:68:51] wire [7:0] _readys_T = {readys_hi, readys_lo}; // @[Arbiter.scala:68:51] wire [7:0] readys_valid = _readys_T; // @[Arbiter.scala:21:23, :68:51] wire _readys_T_1 = readys_valid == _readys_T; // @[Arbiter.scala:21:23, :22:19, :68:51] wire _readys_T_3 = ~_readys_T_2; // @[Arbiter.scala:22:12] wire _readys_T_4 = ~_readys_T_1; // @[Arbiter.scala:22:{12,19}] reg [7:0] readys_mask; // @[Arbiter.scala:23:23] wire [7:0] _readys_filter_T = ~readys_mask; // @[Arbiter.scala:23:23, :24:30] wire [7:0] _readys_filter_T_1 = readys_valid & _readys_filter_T; // @[Arbiter.scala:21:23, :24:{28,30}] wire [15:0] readys_filter = {_readys_filter_T_1, readys_valid}; // @[Arbiter.scala:21:23, :24:{21,28}] wire [14:0] _readys_unready_T = readys_filter[15:1]; // @[package.scala:262:48] wire [15:0] _readys_unready_T_1 = {readys_filter[15], readys_filter[14:0] | _readys_unready_T}; // @[package.scala:262:{43,48}] wire [13:0] _readys_unready_T_2 = _readys_unready_T_1[15:2]; // @[package.scala:262:{43,48}] wire [15:0] _readys_unready_T_3 = {_readys_unready_T_1[15:14], _readys_unready_T_1[13:0] | _readys_unready_T_2}; // @[package.scala:262:{43,48}] wire [11:0] _readys_unready_T_4 = _readys_unready_T_3[15:4]; // @[package.scala:262:{43,48}] wire [15:0] _readys_unready_T_5 = {_readys_unready_T_3[15:12], _readys_unready_T_3[11:0] | _readys_unready_T_4}; // @[package.scala:262:{43,48}] wire [15:0] _readys_unready_T_6 = _readys_unready_T_5; // @[package.scala:262:43, :263:17] wire [14:0] _readys_unready_T_7 = _readys_unready_T_6[15:1]; // @[package.scala:263:17] wire [15:0] _readys_unready_T_8 = {readys_mask, 8'h0}; // @[Arbiter.scala:23:23, :25:66] wire [15:0] readys_unready = {1'h0, _readys_unready_T_7} | _readys_unready_T_8; // @[Arbiter.scala:25:{52,58,66}] wire [7:0] _readys_readys_T = readys_unready[15:8]; // @[Arbiter.scala:25:58, :26:29] wire [7:0] _readys_readys_T_1 = readys_unready[7:0]; // @[Arbiter.scala:25:58, :26:48] wire [7:0] _readys_readys_T_2 = _readys_readys_T & _readys_readys_T_1; // @[Arbiter.scala:26:{29,39,48}] wire [7:0] readys_readys = ~_readys_readys_T_2; // @[Arbiter.scala:26:{18,39}] wire [7:0] _readys_T_7 = readys_readys; // @[Arbiter.scala:26:18, :30:11] wire _readys_T_5 = |readys_valid; // @[Arbiter.scala:21:23, :27:27] wire _readys_T_6 = latch & _readys_T_5; // @[Arbiter.scala:27:{18,27}, :62:24] wire [7:0] _readys_mask_T = readys_readys & readys_valid; // @[Arbiter.scala:21:23, :26:18, :28:29] wire [8:0] _readys_mask_T_1 = {_readys_mask_T, 1'h0}; // @[package.scala:253:48] wire [7:0] _readys_mask_T_2 = _readys_mask_T_1[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _readys_mask_T_3 = _readys_mask_T | _readys_mask_T_2; // @[package.scala:253:{43,53}] wire [9:0] _readys_mask_T_4 = {_readys_mask_T_3, 2'h0}; // @[package.scala:253:{43,48}] wire [7:0] _readys_mask_T_5 = _readys_mask_T_4[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _readys_mask_T_6 = _readys_mask_T_3 | _readys_mask_T_5; // @[package.scala:253:{43,53}] wire [11:0] _readys_mask_T_7 = {_readys_mask_T_6, 4'h0}; // @[package.scala:253:{43,48}] wire [7:0] _readys_mask_T_8 = _readys_mask_T_7[7:0]; // @[package.scala:253:{48,53}] wire [7:0] _readys_mask_T_9 = _readys_mask_T_6 | _readys_mask_T_8; // @[package.scala:253:{43,53}] wire [7:0] _readys_mask_T_10 = _readys_mask_T_9; // @[package.scala:253:43, :254:17] wire _readys_T_8 = _readys_T_7[0]; // @[Arbiter.scala:30:11, :68:76] wire readys_0 = _readys_T_8; // @[Arbiter.scala:68:{27,76}] wire _readys_T_9 = _readys_T_7[1]; // @[Arbiter.scala:30:11, :68:76] wire readys_1 = _readys_T_9; // @[Arbiter.scala:68:{27,76}] wire _readys_T_10 = _readys_T_7[2]; // @[Arbiter.scala:30:11, :68:76] wire readys_2 = _readys_T_10; // @[Arbiter.scala:68:{27,76}] wire _readys_T_11 = _readys_T_7[3]; // @[Arbiter.scala:30:11, :68:76] wire readys_3 = _readys_T_11; // @[Arbiter.scala:68:{27,76}] wire _readys_T_12 = _readys_T_7[4]; // @[Arbiter.scala:30:11, :68:76] wire readys_4 = _readys_T_12; // @[Arbiter.scala:68:{27,76}] wire _readys_T_13 = _readys_T_7[5]; // @[Arbiter.scala:30:11, :68:76] wire readys_5 = _readys_T_13; // @[Arbiter.scala:68:{27,76}] wire _readys_T_14 = _readys_T_7[6]; // @[Arbiter.scala:30:11, :68:76] wire readys_6 = _readys_T_14; // @[Arbiter.scala:68:{27,76}] wire _readys_T_15 = _readys_T_7[7]; // @[Arbiter.scala:30:11, :68:76] wire readys_7 = _readys_T_15; // @[Arbiter.scala:68:{27,76}] wire _winner_T = readys_0 & portsDIO_filtered_0_valid; // @[Xbar.scala:352:24] wire winner_0 = _winner_T; // @[Arbiter.scala:71:{27,69}] wire _winner_T_1 = readys_1 & portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24] wire winner_1 = _winner_T_1; // @[Arbiter.scala:71:{27,69}] wire _winner_T_2 = readys_2 & portsDIO_filtered_2_0_valid; // @[Xbar.scala:352:24] wire winner_2 = _winner_T_2; // @[Arbiter.scala:71:{27,69}] wire _winner_T_3 = readys_3 & portsDIO_filtered_3_0_valid; // @[Xbar.scala:352:24] wire winner_3 = _winner_T_3; // @[Arbiter.scala:71:{27,69}] wire _winner_T_4 = readys_4 & portsDIO_filtered_4_0_valid; // @[Xbar.scala:352:24] wire winner_4 = _winner_T_4; // @[Arbiter.scala:71:{27,69}] wire _winner_T_5 = readys_5 & portsDIO_filtered_5_0_valid; // @[Xbar.scala:352:24] wire winner_5 = _winner_T_5; // @[Arbiter.scala:71:{27,69}] wire _winner_T_6 = readys_6 & portsDIO_filtered_6_0_valid; // @[Xbar.scala:352:24] wire winner_6 = _winner_T_6; // @[Arbiter.scala:71:{27,69}] wire _winner_T_7 = readys_7 & portsDIO_filtered_7_0_valid; // @[Xbar.scala:352:24] wire winner_7 = _winner_T_7; // @[Arbiter.scala:71:{27,69}] wire prefixOR_1 = winner_0; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_2 = prefixOR_1 | winner_1; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_3 = prefixOR_2 | winner_2; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_4 = prefixOR_3 | winner_3; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_5 = prefixOR_4 | winner_4; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_6 = prefixOR_5 | winner_5; // @[Arbiter.scala:71:27, :76:48] wire prefixOR_7 = prefixOR_6 | winner_6; // @[Arbiter.scala:71:27, :76:48] wire _prefixOR_T = prefixOR_7 | winner_7; // @[Arbiter.scala:71:27, :76:48] wire _in_0_d_valid_T = portsDIO_filtered_0_valid | portsDIO_filtered_1_0_valid; // @[Xbar.scala:352:24]
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_114( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_63( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_87 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Replacement.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import freechips.rocketchip.util.property.cover abstract class ReplacementPolicy { def nBits: Int def perSet: Boolean def way: UInt def miss: Unit def hit: Unit def access(touch_way: UInt): Unit def access(touch_ways: Seq[Valid[UInt]]): Unit def state_read: UInt def get_next_state(state: UInt, touch_way: UInt): UInt def get_next_state(state: UInt, touch_ways: Seq[Valid[UInt]]): UInt = { touch_ways.foldLeft(state)((prev, touch_way) => Mux(touch_way.valid, get_next_state(prev, touch_way.bits), prev)) } def get_replace_way(state: UInt): UInt } object ReplacementPolicy { def fromString(s: String, n_ways: Int): ReplacementPolicy = s.toLowerCase match { case "random" => new RandomReplacement(n_ways) case "lru" => new TrueLRU(n_ways) case "plru" => new PseudoLRU(n_ways) case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t") } } class RandomReplacement(n_ways: Int) extends ReplacementPolicy { private val replace = Wire(Bool()) replace := false.B def nBits = 16 def perSet = false private val lfsr = LFSR(nBits, replace) def state_read = WireDefault(lfsr) def way = Random(n_ways, lfsr) def miss = replace := true.B def hit = {} def access(touch_way: UInt) = {} def access(touch_ways: Seq[Valid[UInt]]) = {} def get_next_state(state: UInt, touch_way: UInt) = 0.U //DontCare def get_replace_way(state: UInt) = way } abstract class SeqReplacementPolicy { def access(set: UInt): Unit def update(valid: Bool, hit: Bool, set: UInt, way: UInt): Unit def way: UInt } abstract class SetAssocReplacementPolicy { def access(set: UInt, touch_way: UInt): Unit def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]): Unit def way(set: UInt): UInt } class SeqRandom(n_ways: Int) extends SeqReplacementPolicy { val logic = new RandomReplacement(n_ways) def access(set: UInt) = { } def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = { when (valid && !hit) { logic.miss } } def way = logic.way } class TrueLRU(n_ways: Int) extends ReplacementPolicy { // True LRU replacement policy, using a triangular matrix to track which sets are more recently used than others. // The matrix is packed into a single UInt (or Bits). Example 4-way (6-bits): // [5] - 3 more recent than 2 // [4] - 3 more recent than 1 // [3] - 2 more recent than 1 // [2] - 3 more recent than 0 // [1] - 2 more recent than 0 // [0] - 1 more recent than 0 def nBits = (n_ways * (n_ways-1)) / 2 def perSet = true private val state_reg = RegInit(0.U(nBits.W)) def state_read = WireDefault(state_reg) private def extractMRUVec(state: UInt): Seq[UInt] = { // Extract per-way information about which higher-indexed ways are more recently used val moreRecentVec = Wire(Vec(n_ways-1, UInt(n_ways.W))) var lsb = 0 for (i <- 0 until n_ways-1) { moreRecentVec(i) := Cat(state(lsb+n_ways-i-2,lsb), 0.U((i+1).W)) lsb = lsb + (n_ways - i - 1) } moreRecentVec } def get_next_state(state: UInt, touch_way: UInt): UInt = { val nextState = Wire(Vec(n_ways-1, UInt(n_ways.W))) val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix val wayDec = UIntToOH(touch_way, n_ways) // Compute next value of triangular matrix // set the touched way as more recent than every other way nextState.zipWithIndex.map { case (e, i) => e := Mux(i.U === touch_way, 0.U(n_ways.W), moreRecentVec(i) | wayDec) } nextState.zipWithIndex.tail.foldLeft((nextState.head.apply(n_ways-1,1),0)) { case ((pe,pi),(ce,ci)) => (Cat(ce.apply(n_ways-1,ci+1), pe), ci) }._1 } def access(touch_way: UInt): Unit = { state_reg := get_next_state(state_reg, touch_way) } def access(touch_ways: Seq[Valid[UInt]]): Unit = { when (touch_ways.map(_.valid).orR) { state_reg := get_next_state(state_reg, touch_ways) } for (i <- 1 until touch_ways.size) { cover(PopCount(touch_ways.map(_.valid)) === i.U, s"LRU_UpdateCount$i", s"LRU Update $i simultaneous") } } def get_replace_way(state: UInt): UInt = { val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix // For each way, determine if all other ways are more recent val mruWayDec = (0 until n_ways).map { i => val upperMoreRecent = (if (i == n_ways-1) true.B else moreRecentVec(i).apply(n_ways-1,i+1).andR) val lowerMoreRecent = (if (i == 0) true.B else moreRecentVec.map(e => !e(i)).reduce(_ && _)) upperMoreRecent && lowerMoreRecent } OHToUInt(mruWayDec) } def way = get_replace_way(state_reg) def miss = access(way) def hit = {} @deprecated("replace 'replace' with 'way' from abstract class ReplacementPolicy","Rocket Chip 2020.05") def replace: UInt = way } class PseudoLRU(n_ways: Int) extends ReplacementPolicy { // Pseudo-LRU tree algorithm: https://en.wikipedia.org/wiki/Pseudo-LRU#Tree-PLRU // // // - bits storage example for 4-way PLRU binary tree: // bit[2]: ways 3+2 older than ways 1+0 // / \ // bit[1]: way 3 older than way 2 bit[0]: way 1 older than way 0 // // // - bits storage example for 3-way PLRU binary tree: // bit[1]: way 2 older than ways 1+0 // \ // bit[0]: way 1 older than way 0 // // // - bits storage example for 8-way PLRU binary tree: // bit[6]: ways 7-4 older than ways 3-0 // / \ // bit[5]: ways 7+6 > 5+4 bit[2]: ways 3+2 > 1+0 // / \ / \ // bit[4]: way 7>6 bit[3]: way 5>4 bit[1]: way 3>2 bit[0]: way 1>0 def nBits = n_ways - 1 def perSet = true private val state_reg = if (nBits == 0) Reg(UInt(0.W)) else RegInit(0.U(nBits.W)) def state_read = WireDefault(state_reg) def access(touch_way: UInt): Unit = { state_reg := get_next_state(state_reg, touch_way) } def access(touch_ways: Seq[Valid[UInt]]): Unit = { when (touch_ways.map(_.valid).orR) { state_reg := get_next_state(state_reg, touch_ways) } for (i <- 1 until touch_ways.size) { cover(PopCount(touch_ways.map(_.valid)) === i.U, s"PLRU_UpdateCount$i", s"PLRU Update $i simultaneous") } } /** @param state state_reg bits for this sub-tree * @param touch_way touched way encoded value bits for this sub-tree * @param tree_nways number of ways in this sub-tree */ def get_next_state(state: UInt, touch_way: UInt, tree_nways: Int): UInt = { require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways") require(touch_way.getWidth == (log2Ceil(tree_nways) max 1), s"wrong encoded way width ${touch_way.getWidth} for $tree_nways ways") if (tree_nways > 2) { // we are at a branching node in the tree, so recurse val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree val set_left_older = !touch_way(log2Ceil(tree_nways)-1) val left_subtree_state = state.extract(tree_nways-3, right_nways-1) val right_subtree_state = state(right_nways-2, 0) if (left_nways > 1) { // we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees Cat(set_left_older, Mux(set_left_older, left_subtree_state, // if setting left sub-tree as older, do NOT recurse into left sub-tree get_next_state(left_subtree_state, touch_way.extract(log2Ceil(left_nways)-1,0), left_nways)), // recurse left if newer Mux(set_left_older, get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree } else { // we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree Cat(set_left_older, Mux(set_left_older, get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree } } else if (tree_nways == 2) { // we are at a leaf node at the end of the tree, so set the single state bit opposite of the lsb of the touched way encoded value !touch_way(0) } else { // tree_nways <= 1 // we are at an empty node in an empty tree for 1 way, so return single zero bit for Chisel (no zero-width wires) 0.U(1.W) } } def get_next_state(state: UInt, touch_way: UInt): UInt = { val touch_way_sized = if (touch_way.getWidth < log2Ceil(n_ways)) touch_way.padTo (log2Ceil(n_ways)) else touch_way.extract(log2Ceil(n_ways)-1,0) get_next_state(state, touch_way_sized, n_ways) } /** @param state state_reg bits for this sub-tree * @param tree_nways number of ways in this sub-tree */ def get_replace_way(state: UInt, tree_nways: Int): UInt = { require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways") // this algorithm recursively descends the binary tree, filling in the way-to-replace encoded value from msb to lsb if (tree_nways > 2) { // we are at a branching node in the tree, so recurse val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree val left_subtree_older = state(tree_nways-2) val left_subtree_state = state.extract(tree_nways-3, right_nways-1) val right_subtree_state = state(right_nways-2, 0) if (left_nways > 1) { // we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value Mux(left_subtree_older, // if left sub-tree is older, recurse left, else recurse right get_replace_way(left_subtree_state, left_nways), // recurse left get_replace_way(right_subtree_state, right_nways))) // recurse right } else { // we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value Mux(left_subtree_older, // if left sub-tree is older, return and do not recurse right 0.U(1.W), get_replace_way(right_subtree_state, right_nways))) // recurse right } } else if (tree_nways == 2) { // we are at a leaf node at the end of the tree, so just return the single state bit as lsb of the way-to-replace encoded value state(0) } else { // tree_nways <= 1 // we are at an empty node in an unbalanced tree for non-power-of-2 ways, so return single zero bit as lsb of the way-to-replace encoded value 0.U(1.W) } } def get_replace_way(state: UInt): UInt = get_replace_way(state, n_ways) def way = get_replace_way(state_reg) def miss = access(way) def hit = {} } class SeqPLRU(n_sets: Int, n_ways: Int) extends SeqReplacementPolicy { val logic = new PseudoLRU(n_ways) val state = SyncReadMem(n_sets, UInt(logic.nBits.W)) val current_state = Wire(UInt(logic.nBits.W)) val next_state = Wire(UInt(logic.nBits.W)) val plru_way = logic.get_replace_way(current_state) def access(set: UInt) = { current_state := state.read(set) } def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = { val update_way = Mux(hit, way, plru_way) next_state := logic.get_next_state(current_state, update_way) when (valid) { state.write(set, next_state) } } def way = plru_way } class SetAssocLRU(n_sets: Int, n_ways: Int, policy: String) extends SetAssocReplacementPolicy { val logic = policy.toLowerCase match { case "plru" => new PseudoLRU(n_ways) case "lru" => new TrueLRU(n_ways) case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t") } val state_vec = if (logic.nBits == 0) Reg(Vec(n_sets, UInt(logic.nBits.W))) // Work around elaboration error on following line else RegInit(VecInit(Seq.fill(n_sets)(0.U(logic.nBits.W)))) def access(set: UInt, touch_way: UInt) = { state_vec(set) := logic.get_next_state(state_vec(set), touch_way) } def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]) = { require(sets.size == touch_ways.size, "internal consistency check: should be same number of simultaneous updates for sets and touch_ways") for (set <- 0 until n_sets) { val set_touch_ways = (sets zip touch_ways).map { case (touch_set, touch_way) => Pipe(touch_way.valid && (touch_set === set.U), touch_way.bits, 0)} when (set_touch_ways.map(_.valid).orR) { state_vec(set) := logic.get_next_state(state_vec(set), set_touch_ways) } } } def way(set: UInt) = logic.get_replace_way(state_vec(set)) } // Synthesizable unit tests import freechips.rocketchip.unittest._ class PLRUTest(n_ways: Int, timeout: Int = 500) extends UnitTest(timeout) { val plru = new PseudoLRU(n_ways) // step io.finished := RegNext(true.B, false.B) val get_replace_ways = (0 until (1 << (n_ways-1))).map(state => plru.get_replace_way(state = state.U((n_ways-1).W))) val get_next_states = (0 until (1 << (n_ways-1))).map(state => (0 until n_ways).map(way => plru.get_next_state (state = state.U((n_ways-1).W), touch_way = way.U(log2Ceil(n_ways).W)))) n_ways match { case 2 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_next_states(0)(0) === 1.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=1 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 0.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=0 actual=%d", get_next_states(0)(1)) assert(get_next_states(1)(0) === 1.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=1 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 0.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=0 actual=%d", get_next_states(1)(1)) } case 3 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_replace_ways(2) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=2 actual=%d", get_replace_ways(2)) assert(get_replace_ways(3) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=2 actual=%d", get_replace_ways(3)) assert(get_next_states(0)(0) === 3.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=3 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 2.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=2 actual=%d", get_next_states(0)(1)) assert(get_next_states(0)(2) === 0.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=0 actual=%d", get_next_states(0)(2)) assert(get_next_states(1)(0) === 3.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=3 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 2.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=2 actual=%d", get_next_states(1)(1)) assert(get_next_states(1)(2) === 1.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=1 actual=%d", get_next_states(1)(2)) assert(get_next_states(2)(0) === 3.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=3 actual=%d", get_next_states(2)(0)) assert(get_next_states(2)(1) === 2.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=2 actual=%d", get_next_states(2)(1)) assert(get_next_states(2)(2) === 0.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=0 actual=%d", get_next_states(2)(2)) assert(get_next_states(3)(0) === 3.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=3 actual=%d", get_next_states(3)(0)) assert(get_next_states(3)(1) === 2.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=2 actual=%d", get_next_states(3)(1)) assert(get_next_states(3)(2) === 1.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=1 actual=%d", get_next_states(3)(2)) } case 4 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_replace_ways(2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=0 actual=%d", get_replace_ways(2)) assert(get_replace_ways(3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=1 actual=%d", get_replace_ways(3)) assert(get_replace_ways(4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=4: expected=2 actual=%d", get_replace_ways(4)) assert(get_replace_ways(5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=5: expected=2 actual=%d", get_replace_ways(5)) assert(get_replace_ways(6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=6: expected=3 actual=%d", get_replace_ways(6)) assert(get_replace_ways(7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=7: expected=3 actual=%d", get_replace_ways(7)) assert(get_next_states(0)(0) === 5.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=5 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 4.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=4 actual=%d", get_next_states(0)(1)) assert(get_next_states(0)(2) === 2.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=2 actual=%d", get_next_states(0)(2)) assert(get_next_states(0)(3) === 0.U(plru.nBits.W), s"get_next_state state=0 way=3: expected=0 actual=%d", get_next_states(0)(3)) assert(get_next_states(1)(0) === 5.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=5 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 4.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=4 actual=%d", get_next_states(1)(1)) assert(get_next_states(1)(2) === 3.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=3 actual=%d", get_next_states(1)(2)) assert(get_next_states(1)(3) === 1.U(plru.nBits.W), s"get_next_state state=1 way=3: expected=1 actual=%d", get_next_states(1)(3)) assert(get_next_states(2)(0) === 7.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=7 actual=%d", get_next_states(2)(0)) assert(get_next_states(2)(1) === 6.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=6 actual=%d", get_next_states(2)(1)) assert(get_next_states(2)(2) === 2.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=2 actual=%d", get_next_states(2)(2)) assert(get_next_states(2)(3) === 0.U(plru.nBits.W), s"get_next_state state=2 way=3: expected=0 actual=%d", get_next_states(2)(3)) assert(get_next_states(3)(0) === 7.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=7 actual=%d", get_next_states(3)(0)) assert(get_next_states(3)(1) === 6.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=6 actual=%d", get_next_states(3)(1)) assert(get_next_states(3)(2) === 3.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=3 actual=%d", get_next_states(3)(2)) assert(get_next_states(3)(3) === 1.U(plru.nBits.W), s"get_next_state state=3 way=3: expected=1 actual=%d", get_next_states(3)(3)) assert(get_next_states(4)(0) === 5.U(plru.nBits.W), s"get_next_state state=4 way=0: expected=5 actual=%d", get_next_states(4)(0)) assert(get_next_states(4)(1) === 4.U(plru.nBits.W), s"get_next_state state=4 way=1: expected=4 actual=%d", get_next_states(4)(1)) assert(get_next_states(4)(2) === 2.U(plru.nBits.W), s"get_next_state state=4 way=2: expected=2 actual=%d", get_next_states(4)(2)) assert(get_next_states(4)(3) === 0.U(plru.nBits.W), s"get_next_state state=4 way=3: expected=0 actual=%d", get_next_states(4)(3)) assert(get_next_states(5)(0) === 5.U(plru.nBits.W), s"get_next_state state=5 way=0: expected=5 actual=%d", get_next_states(5)(0)) assert(get_next_states(5)(1) === 4.U(plru.nBits.W), s"get_next_state state=5 way=1: expected=4 actual=%d", get_next_states(5)(1)) assert(get_next_states(5)(2) === 3.U(plru.nBits.W), s"get_next_state state=5 way=2: expected=3 actual=%d", get_next_states(5)(2)) assert(get_next_states(5)(3) === 1.U(plru.nBits.W), s"get_next_state state=5 way=3: expected=1 actual=%d", get_next_states(5)(3)) assert(get_next_states(6)(0) === 7.U(plru.nBits.W), s"get_next_state state=6 way=0: expected=7 actual=%d", get_next_states(6)(0)) assert(get_next_states(6)(1) === 6.U(plru.nBits.W), s"get_next_state state=6 way=1: expected=6 actual=%d", get_next_states(6)(1)) assert(get_next_states(6)(2) === 2.U(plru.nBits.W), s"get_next_state state=6 way=2: expected=2 actual=%d", get_next_states(6)(2)) assert(get_next_states(6)(3) === 0.U(plru.nBits.W), s"get_next_state state=6 way=3: expected=0 actual=%d", get_next_states(6)(3)) assert(get_next_states(7)(0) === 7.U(plru.nBits.W), s"get_next_state state=7 way=0: expected=7 actual=%d", get_next_states(7)(0)) assert(get_next_states(7)(1) === 6.U(plru.nBits.W), s"get_next_state state=7 way=5: expected=6 actual=%d", get_next_states(7)(1)) assert(get_next_states(7)(2) === 3.U(plru.nBits.W), s"get_next_state state=7 way=2: expected=3 actual=%d", get_next_states(7)(2)) assert(get_next_states(7)(3) === 1.U(plru.nBits.W), s"get_next_state state=7 way=3: expected=1 actual=%d", get_next_states(7)(3)) } case 5 => { assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0)) assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1)) assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2)) assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3)) assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4)) assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5)) assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6)) assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7)) assert(get_replace_ways( 8) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=4 actual=%d", get_replace_ways( 8)) assert(get_replace_ways( 9) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=4 actual=%d", get_replace_ways( 9)) assert(get_replace_ways(10) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=4 actual=%d", get_replace_ways(10)) assert(get_replace_ways(11) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=4 actual=%d", get_replace_ways(11)) assert(get_replace_ways(12) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=4 actual=%d", get_replace_ways(12)) assert(get_replace_ways(13) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=4 actual=%d", get_replace_ways(13)) assert(get_replace_ways(14) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=4 actual=%d", get_replace_ways(14)) assert(get_replace_ways(15) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=4 actual=%d", get_replace_ways(15)) assert(get_next_states( 0)(0) === 13.U(plru.nBits.W), s"get_next_state state=00 way=0: expected=13 actual=%d", get_next_states( 0)(0)) assert(get_next_states( 0)(1) === 12.U(plru.nBits.W), s"get_next_state state=00 way=1: expected=12 actual=%d", get_next_states( 0)(1)) assert(get_next_states( 0)(2) === 10.U(plru.nBits.W), s"get_next_state state=00 way=2: expected=10 actual=%d", get_next_states( 0)(2)) assert(get_next_states( 0)(3) === 8.U(plru.nBits.W), s"get_next_state state=00 way=3: expected=08 actual=%d", get_next_states( 0)(3)) assert(get_next_states( 0)(4) === 0.U(plru.nBits.W), s"get_next_state state=00 way=4: expected=00 actual=%d", get_next_states( 0)(4)) assert(get_next_states( 1)(0) === 13.U(plru.nBits.W), s"get_next_state state=01 way=0: expected=13 actual=%d", get_next_states( 1)(0)) assert(get_next_states( 1)(1) === 12.U(plru.nBits.W), s"get_next_state state=01 way=1: expected=12 actual=%d", get_next_states( 1)(1)) assert(get_next_states( 1)(2) === 11.U(plru.nBits.W), s"get_next_state state=01 way=2: expected=11 actual=%d", get_next_states( 1)(2)) assert(get_next_states( 1)(3) === 9.U(plru.nBits.W), s"get_next_state state=01 way=3: expected=09 actual=%d", get_next_states( 1)(3)) assert(get_next_states( 1)(4) === 1.U(plru.nBits.W), s"get_next_state state=01 way=4: expected=01 actual=%d", get_next_states( 1)(4)) assert(get_next_states( 2)(0) === 15.U(plru.nBits.W), s"get_next_state state=02 way=0: expected=15 actual=%d", get_next_states( 2)(0)) assert(get_next_states( 2)(1) === 14.U(plru.nBits.W), s"get_next_state state=02 way=1: expected=14 actual=%d", get_next_states( 2)(1)) assert(get_next_states( 2)(2) === 10.U(plru.nBits.W), s"get_next_state state=02 way=2: expected=10 actual=%d", get_next_states( 2)(2)) assert(get_next_states( 2)(3) === 8.U(plru.nBits.W), s"get_next_state state=02 way=3: expected=08 actual=%d", get_next_states( 2)(3)) assert(get_next_states( 2)(4) === 2.U(plru.nBits.W), s"get_next_state state=02 way=4: expected=02 actual=%d", get_next_states( 2)(4)) assert(get_next_states( 3)(0) === 15.U(plru.nBits.W), s"get_next_state state=03 way=0: expected=15 actual=%d", get_next_states( 3)(0)) assert(get_next_states( 3)(1) === 14.U(plru.nBits.W), s"get_next_state state=03 way=1: expected=14 actual=%d", get_next_states( 3)(1)) assert(get_next_states( 3)(2) === 11.U(plru.nBits.W), s"get_next_state state=03 way=2: expected=11 actual=%d", get_next_states( 3)(2)) assert(get_next_states( 3)(3) === 9.U(plru.nBits.W), s"get_next_state state=03 way=3: expected=09 actual=%d", get_next_states( 3)(3)) assert(get_next_states( 3)(4) === 3.U(plru.nBits.W), s"get_next_state state=03 way=4: expected=03 actual=%d", get_next_states( 3)(4)) assert(get_next_states( 4)(0) === 13.U(plru.nBits.W), s"get_next_state state=04 way=0: expected=13 actual=%d", get_next_states( 4)(0)) assert(get_next_states( 4)(1) === 12.U(plru.nBits.W), s"get_next_state state=04 way=1: expected=12 actual=%d", get_next_states( 4)(1)) assert(get_next_states( 4)(2) === 10.U(plru.nBits.W), s"get_next_state state=04 way=2: expected=10 actual=%d", get_next_states( 4)(2)) assert(get_next_states( 4)(3) === 8.U(plru.nBits.W), s"get_next_state state=04 way=3: expected=08 actual=%d", get_next_states( 4)(3)) assert(get_next_states( 4)(4) === 4.U(plru.nBits.W), s"get_next_state state=04 way=4: expected=04 actual=%d", get_next_states( 4)(4)) assert(get_next_states( 5)(0) === 13.U(plru.nBits.W), s"get_next_state state=05 way=0: expected=13 actual=%d", get_next_states( 5)(0)) assert(get_next_states( 5)(1) === 12.U(plru.nBits.W), s"get_next_state state=05 way=1: expected=12 actual=%d", get_next_states( 5)(1)) assert(get_next_states( 5)(2) === 11.U(plru.nBits.W), s"get_next_state state=05 way=2: expected=11 actual=%d", get_next_states( 5)(2)) assert(get_next_states( 5)(3) === 9.U(plru.nBits.W), s"get_next_state state=05 way=3: expected=09 actual=%d", get_next_states( 5)(3)) assert(get_next_states( 5)(4) === 5.U(plru.nBits.W), s"get_next_state state=05 way=4: expected=05 actual=%d", get_next_states( 5)(4)) assert(get_next_states( 6)(0) === 15.U(plru.nBits.W), s"get_next_state state=06 way=0: expected=15 actual=%d", get_next_states( 6)(0)) assert(get_next_states( 6)(1) === 14.U(plru.nBits.W), s"get_next_state state=06 way=1: expected=14 actual=%d", get_next_states( 6)(1)) assert(get_next_states( 6)(2) === 10.U(plru.nBits.W), s"get_next_state state=06 way=2: expected=10 actual=%d", get_next_states( 6)(2)) assert(get_next_states( 6)(3) === 8.U(plru.nBits.W), s"get_next_state state=06 way=3: expected=08 actual=%d", get_next_states( 6)(3)) assert(get_next_states( 6)(4) === 6.U(plru.nBits.W), s"get_next_state state=06 way=4: expected=06 actual=%d", get_next_states( 6)(4)) assert(get_next_states( 7)(0) === 15.U(plru.nBits.W), s"get_next_state state=07 way=0: expected=15 actual=%d", get_next_states( 7)(0)) assert(get_next_states( 7)(1) === 14.U(plru.nBits.W), s"get_next_state state=07 way=5: expected=14 actual=%d", get_next_states( 7)(1)) assert(get_next_states( 7)(2) === 11.U(plru.nBits.W), s"get_next_state state=07 way=2: expected=11 actual=%d", get_next_states( 7)(2)) assert(get_next_states( 7)(3) === 9.U(plru.nBits.W), s"get_next_state state=07 way=3: expected=09 actual=%d", get_next_states( 7)(3)) assert(get_next_states( 7)(4) === 7.U(plru.nBits.W), s"get_next_state state=07 way=4: expected=07 actual=%d", get_next_states( 7)(4)) assert(get_next_states( 8)(0) === 13.U(plru.nBits.W), s"get_next_state state=08 way=0: expected=13 actual=%d", get_next_states( 8)(0)) assert(get_next_states( 8)(1) === 12.U(plru.nBits.W), s"get_next_state state=08 way=1: expected=12 actual=%d", get_next_states( 8)(1)) assert(get_next_states( 8)(2) === 10.U(plru.nBits.W), s"get_next_state state=08 way=2: expected=10 actual=%d", get_next_states( 8)(2)) assert(get_next_states( 8)(3) === 8.U(plru.nBits.W), s"get_next_state state=08 way=3: expected=08 actual=%d", get_next_states( 8)(3)) assert(get_next_states( 8)(4) === 0.U(plru.nBits.W), s"get_next_state state=08 way=4: expected=00 actual=%d", get_next_states( 8)(4)) assert(get_next_states( 9)(0) === 13.U(plru.nBits.W), s"get_next_state state=09 way=0: expected=13 actual=%d", get_next_states( 9)(0)) assert(get_next_states( 9)(1) === 12.U(plru.nBits.W), s"get_next_state state=09 way=1: expected=12 actual=%d", get_next_states( 9)(1)) assert(get_next_states( 9)(2) === 11.U(plru.nBits.W), s"get_next_state state=09 way=2: expected=11 actual=%d", get_next_states( 9)(2)) assert(get_next_states( 9)(3) === 9.U(plru.nBits.W), s"get_next_state state=09 way=3: expected=09 actual=%d", get_next_states( 9)(3)) assert(get_next_states( 9)(4) === 1.U(plru.nBits.W), s"get_next_state state=09 way=4: expected=01 actual=%d", get_next_states( 9)(4)) assert(get_next_states(10)(0) === 15.U(plru.nBits.W), s"get_next_state state=10 way=0: expected=15 actual=%d", get_next_states(10)(0)) assert(get_next_states(10)(1) === 14.U(plru.nBits.W), s"get_next_state state=10 way=1: expected=14 actual=%d", get_next_states(10)(1)) assert(get_next_states(10)(2) === 10.U(plru.nBits.W), s"get_next_state state=10 way=2: expected=10 actual=%d", get_next_states(10)(2)) assert(get_next_states(10)(3) === 8.U(plru.nBits.W), s"get_next_state state=10 way=3: expected=08 actual=%d", get_next_states(10)(3)) assert(get_next_states(10)(4) === 2.U(plru.nBits.W), s"get_next_state state=10 way=4: expected=02 actual=%d", get_next_states(10)(4)) assert(get_next_states(11)(0) === 15.U(plru.nBits.W), s"get_next_state state=11 way=0: expected=15 actual=%d", get_next_states(11)(0)) assert(get_next_states(11)(1) === 14.U(plru.nBits.W), s"get_next_state state=11 way=1: expected=14 actual=%d", get_next_states(11)(1)) assert(get_next_states(11)(2) === 11.U(plru.nBits.W), s"get_next_state state=11 way=2: expected=11 actual=%d", get_next_states(11)(2)) assert(get_next_states(11)(3) === 9.U(plru.nBits.W), s"get_next_state state=11 way=3: expected=09 actual=%d", get_next_states(11)(3)) assert(get_next_states(11)(4) === 3.U(plru.nBits.W), s"get_next_state state=11 way=4: expected=03 actual=%d", get_next_states(11)(4)) assert(get_next_states(12)(0) === 13.U(plru.nBits.W), s"get_next_state state=12 way=0: expected=13 actual=%d", get_next_states(12)(0)) assert(get_next_states(12)(1) === 12.U(plru.nBits.W), s"get_next_state state=12 way=1: expected=12 actual=%d", get_next_states(12)(1)) assert(get_next_states(12)(2) === 10.U(plru.nBits.W), s"get_next_state state=12 way=2: expected=10 actual=%d", get_next_states(12)(2)) assert(get_next_states(12)(3) === 8.U(plru.nBits.W), s"get_next_state state=12 way=3: expected=08 actual=%d", get_next_states(12)(3)) assert(get_next_states(12)(4) === 4.U(plru.nBits.W), s"get_next_state state=12 way=4: expected=04 actual=%d", get_next_states(12)(4)) assert(get_next_states(13)(0) === 13.U(plru.nBits.W), s"get_next_state state=13 way=0: expected=13 actual=%d", get_next_states(13)(0)) assert(get_next_states(13)(1) === 12.U(plru.nBits.W), s"get_next_state state=13 way=1: expected=12 actual=%d", get_next_states(13)(1)) assert(get_next_states(13)(2) === 11.U(plru.nBits.W), s"get_next_state state=13 way=2: expected=11 actual=%d", get_next_states(13)(2)) assert(get_next_states(13)(3) === 9.U(plru.nBits.W), s"get_next_state state=13 way=3: expected=09 actual=%d", get_next_states(13)(3)) assert(get_next_states(13)(4) === 5.U(plru.nBits.W), s"get_next_state state=13 way=4: expected=05 actual=%d", get_next_states(13)(4)) assert(get_next_states(14)(0) === 15.U(plru.nBits.W), s"get_next_state state=14 way=0: expected=15 actual=%d", get_next_states(14)(0)) assert(get_next_states(14)(1) === 14.U(plru.nBits.W), s"get_next_state state=14 way=1: expected=14 actual=%d", get_next_states(14)(1)) assert(get_next_states(14)(2) === 10.U(plru.nBits.W), s"get_next_state state=14 way=2: expected=10 actual=%d", get_next_states(14)(2)) assert(get_next_states(14)(3) === 8.U(plru.nBits.W), s"get_next_state state=14 way=3: expected=08 actual=%d", get_next_states(14)(3)) assert(get_next_states(14)(4) === 6.U(plru.nBits.W), s"get_next_state state=14 way=4: expected=06 actual=%d", get_next_states(14)(4)) assert(get_next_states(15)(0) === 15.U(plru.nBits.W), s"get_next_state state=15 way=0: expected=15 actual=%d", get_next_states(15)(0)) assert(get_next_states(15)(1) === 14.U(plru.nBits.W), s"get_next_state state=15 way=5: expected=14 actual=%d", get_next_states(15)(1)) assert(get_next_states(15)(2) === 11.U(plru.nBits.W), s"get_next_state state=15 way=2: expected=11 actual=%d", get_next_states(15)(2)) assert(get_next_states(15)(3) === 9.U(plru.nBits.W), s"get_next_state state=15 way=3: expected=09 actual=%d", get_next_states(15)(3)) assert(get_next_states(15)(4) === 7.U(plru.nBits.W), s"get_next_state state=15 way=4: expected=07 actual=%d", get_next_states(15)(4)) } case 6 => { assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0)) assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1)) assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2)) assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3)) assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4)) assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5)) assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6)) assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7)) assert(get_replace_ways( 8) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=0 actual=%d", get_replace_ways( 8)) assert(get_replace_ways( 9) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=1 actual=%d", get_replace_ways( 9)) assert(get_replace_ways(10) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=0 actual=%d", get_replace_ways(10)) assert(get_replace_ways(11) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=1 actual=%d", get_replace_ways(11)) assert(get_replace_ways(12) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=2 actual=%d", get_replace_ways(12)) assert(get_replace_ways(13) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=2 actual=%d", get_replace_ways(13)) assert(get_replace_ways(14) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=3 actual=%d", get_replace_ways(14)) assert(get_replace_ways(15) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=3 actual=%d", get_replace_ways(15)) assert(get_replace_ways(16) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=16: expected=4 actual=%d", get_replace_ways(16)) assert(get_replace_ways(17) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=17: expected=4 actual=%d", get_replace_ways(17)) assert(get_replace_ways(18) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=18: expected=4 actual=%d", get_replace_ways(18)) assert(get_replace_ways(19) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=19: expected=4 actual=%d", get_replace_ways(19)) assert(get_replace_ways(20) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=20: expected=4 actual=%d", get_replace_ways(20)) assert(get_replace_ways(21) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=21: expected=4 actual=%d", get_replace_ways(21)) assert(get_replace_ways(22) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=22: expected=4 actual=%d", get_replace_ways(22)) assert(get_replace_ways(23) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=23: expected=4 actual=%d", get_replace_ways(23)) assert(get_replace_ways(24) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=24: expected=5 actual=%d", get_replace_ways(24)) assert(get_replace_ways(25) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=25: expected=5 actual=%d", get_replace_ways(25)) assert(get_replace_ways(26) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=26: expected=5 actual=%d", get_replace_ways(26)) assert(get_replace_ways(27) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=27: expected=5 actual=%d", get_replace_ways(27)) assert(get_replace_ways(28) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=28: expected=5 actual=%d", get_replace_ways(28)) assert(get_replace_ways(29) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=29: expected=5 actual=%d", get_replace_ways(29)) assert(get_replace_ways(30) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=30: expected=5 actual=%d", get_replace_ways(30)) assert(get_replace_ways(31) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=31: expected=5 actual=%d", get_replace_ways(31)) } case _ => throw new IllegalArgumentException(s"no test pattern found for n_ways=$n_ways") } } File Consts.scala: // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket.constants import chisel3._ import chisel3.util._ import freechips.rocketchip.util._ trait ScalarOpConstants { val SZ_BR = 3 def BR_X = BitPat("b???") def BR_EQ = 0.U(3.W) def BR_NE = 1.U(3.W) def BR_J = 2.U(3.W) def BR_N = 3.U(3.W) def BR_LT = 4.U(3.W) def BR_GE = 5.U(3.W) def BR_LTU = 6.U(3.W) def BR_GEU = 7.U(3.W) def A1_X = BitPat("b??") def A1_ZERO = 0.U(2.W) def A1_RS1 = 1.U(2.W) def A1_PC = 2.U(2.W) def A1_RS1SHL = 3.U(2.W) def IMM_X = BitPat("b???") def IMM_S = 0.U(3.W) def IMM_SB = 1.U(3.W) def IMM_U = 2.U(3.W) def IMM_UJ = 3.U(3.W) def IMM_I = 4.U(3.W) def IMM_Z = 5.U(3.W) def A2_X = BitPat("b???") def A2_ZERO = 0.U(3.W) def A2_SIZE = 1.U(3.W) def A2_RS2 = 2.U(3.W) def A2_IMM = 3.U(3.W) def A2_RS2OH = 4.U(3.W) def A2_IMMOH = 5.U(3.W) def X = BitPat("b?") def N = BitPat("b0") def Y = BitPat("b1") val SZ_DW = 1 def DW_X = X def DW_32 = false.B def DW_64 = true.B def DW_XPR = DW_64 } trait MemoryOpConstants { val NUM_XA_OPS = 9 val M_SZ = 5 def M_X = BitPat("b?????"); def M_XRD = "b00000".U; // int load def M_XWR = "b00001".U; // int store def M_PFR = "b00010".U; // prefetch with intent to read def M_PFW = "b00011".U; // prefetch with intent to write def M_XA_SWAP = "b00100".U def M_FLUSH_ALL = "b00101".U // flush all lines def M_XLR = "b00110".U def M_XSC = "b00111".U def M_XA_ADD = "b01000".U def M_XA_XOR = "b01001".U def M_XA_OR = "b01010".U def M_XA_AND = "b01011".U def M_XA_MIN = "b01100".U def M_XA_MAX = "b01101".U def M_XA_MINU = "b01110".U def M_XA_MAXU = "b01111".U def M_FLUSH = "b10000".U // write back dirty data and cede R/W permissions def M_PWR = "b10001".U // partial (masked) store def M_PRODUCE = "b10010".U // write back dirty data and cede W permissions def M_CLEAN = "b10011".U // write back dirty data and retain R/W permissions def M_SFENCE = "b10100".U // SFENCE.VMA def M_HFENCEV = "b10101".U // HFENCE.VVMA def M_HFENCEG = "b10110".U // HFENCE.GVMA def M_WOK = "b10111".U // check write permissions but don't perform a write def M_HLVX = "b10000".U // HLVX instruction def isAMOLogical(cmd: UInt) = cmd.isOneOf(M_XA_SWAP, M_XA_XOR, M_XA_OR, M_XA_AND) def isAMOArithmetic(cmd: UInt) = cmd.isOneOf(M_XA_ADD, M_XA_MIN, M_XA_MAX, M_XA_MINU, M_XA_MAXU) def isAMO(cmd: UInt) = isAMOLogical(cmd) || isAMOArithmetic(cmd) def isPrefetch(cmd: UInt) = cmd === M_PFR || cmd === M_PFW def isRead(cmd: UInt) = cmd.isOneOf(M_XRD, M_HLVX, M_XLR, M_XSC) || isAMO(cmd) def isWrite(cmd: UInt) = cmd === M_XWR || cmd === M_PWR || cmd === M_XSC || isAMO(cmd) def isWriteIntent(cmd: UInt) = isWrite(cmd) || cmd === M_PFW || cmd === M_XLR } File TLB.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.devices.debug.DebugModuleKey import freechips.rocketchip.diplomacy.RegionType import freechips.rocketchip.subsystem.CacheBlockBytes import freechips.rocketchip.tile.{CoreModule, CoreBundle} import freechips.rocketchip.tilelink._ import freechips.rocketchip.util.{OptimizationBarrier, SetAssocLRU, PseudoLRU, PopCountAtLeast, property} import freechips.rocketchip.util.BooleanToAugmentedBoolean import freechips.rocketchip.util.IntToAugmentedInt import freechips.rocketchip.util.UIntToAugmentedUInt import freechips.rocketchip.util.UIntIsOneOf import freechips.rocketchip.util.SeqToAugmentedSeq import freechips.rocketchip.util.SeqBoolBitwiseOps case object ASIdBits extends Field[Int](0) case object VMIdBits extends Field[Int](0) /** =SFENCE= * rs1 rs2 * {{{ * 0 0 -> flush All * 0 1 -> flush by ASID * 1 1 -> flush by ADDR * 1 0 -> flush by ADDR and ASID * }}} * {{{ * If rs1=x0 and rs2=x0, the fence orders all reads and writes made to any level of the page tables, for all address spaces. * If rs1=x0 and rs2!=x0, the fence orders all reads and writes made to any level of the page tables, but only for the address space identified by integer register rs2. Accesses to global mappings (see Section 4.3.1) are not ordered. * If rs1!=x0 and rs2=x0, the fence orders only reads and writes made to the leaf page table entry corresponding to the virtual address in rs1, for all address spaces. * If rs1!=x0 and rs2!=x0, the fence orders only reads and writes made to the leaf page table entry corresponding to the virtual address in rs1, for the address space identified by integer register rs2. Accesses to global mappings are not ordered. * }}} */ class SFenceReq(implicit p: Parameters) extends CoreBundle()(p) { val rs1 = Bool() val rs2 = Bool() val addr = UInt(vaddrBits.W) val asid = UInt((asIdBits max 1).W) // TODO zero-width val hv = Bool() val hg = Bool() } class TLBReq(lgMaxSize: Int)(implicit p: Parameters) extends CoreBundle()(p) { /** request address from CPU. */ val vaddr = UInt(vaddrBitsExtended.W) /** don't lookup TLB, bypass vaddr as paddr */ val passthrough = Bool() /** granularity */ val size = UInt(log2Ceil(lgMaxSize + 1).W) /** memory command. */ val cmd = Bits(M_SZ.W) val prv = UInt(PRV.SZ.W) /** virtualization mode */ val v = Bool() } class TLBExceptions extends Bundle { val ld = Bool() val st = Bool() val inst = Bool() } class TLBResp(lgMaxSize: Int = 3)(implicit p: Parameters) extends CoreBundle()(p) { // lookup responses val miss = Bool() /** physical address */ val paddr = UInt(paddrBits.W) val gpa = UInt(vaddrBitsExtended.W) val gpa_is_pte = Bool() /** page fault exception */ val pf = new TLBExceptions /** guest page fault exception */ val gf = new TLBExceptions /** access exception */ val ae = new TLBExceptions /** misaligned access exception */ val ma = new TLBExceptions /** if this address is cacheable */ val cacheable = Bool() /** if caches must allocate this address */ val must_alloc = Bool() /** if this address is prefetchable for caches*/ val prefetchable = Bool() /** size/cmd of request that generated this response*/ val size = UInt(log2Ceil(lgMaxSize + 1).W) val cmd = UInt(M_SZ.W) } class TLBEntryData(implicit p: Parameters) extends CoreBundle()(p) { val ppn = UInt(ppnBits.W) /** pte.u user */ val u = Bool() /** pte.g global */ val g = Bool() /** access exception. * D$ -> PTW -> TLB AE * Alignment failed. */ val ae_ptw = Bool() val ae_final = Bool() val ae_stage2 = Bool() /** page fault */ val pf = Bool() /** guest page fault */ val gf = Bool() /** supervisor write */ val sw = Bool() /** supervisor execute */ val sx = Bool() /** supervisor read */ val sr = Bool() /** hypervisor write */ val hw = Bool() /** hypervisor excute */ val hx = Bool() /** hypervisor read */ val hr = Bool() /** prot_w */ val pw = Bool() /** prot_x */ val px = Bool() /** prot_r */ val pr = Bool() /** PutPartial */ val ppp = Bool() /** AMO logical */ val pal = Bool() /** AMO arithmetic */ val paa = Bool() /** get/put effects */ val eff = Bool() /** cacheable */ val c = Bool() /** fragmented_superpage support */ val fragmented_superpage = Bool() } /** basic cell for TLB data */ class TLBEntry(val nSectors: Int, val superpage: Boolean, val superpageOnly: Boolean)(implicit p: Parameters) extends CoreBundle()(p) { require(nSectors == 1 || !superpage) require(!superpageOnly || superpage) val level = UInt(log2Ceil(pgLevels).W) /** use vpn as tag */ val tag_vpn = UInt(vpnBits.W) /** tag in vitualization mode */ val tag_v = Bool() /** entry data */ val data = Vec(nSectors, UInt(new TLBEntryData().getWidth.W)) /** valid bit */ val valid = Vec(nSectors, Bool()) /** returns all entry data in this entry */ def entry_data = data.map(_.asTypeOf(new TLBEntryData)) /** returns the index of sector */ private def sectorIdx(vpn: UInt) = vpn.extract(nSectors.log2-1, 0) /** returns the entry data matched with this vpn*/ def getData(vpn: UInt) = OptimizationBarrier(data(sectorIdx(vpn)).asTypeOf(new TLBEntryData)) /** returns whether a sector hits */ def sectorHit(vpn: UInt, virtual: Bool) = valid.orR && sectorTagMatch(vpn, virtual) /** returns whether tag matches vpn */ def sectorTagMatch(vpn: UInt, virtual: Bool) = (((tag_vpn ^ vpn) >> nSectors.log2) === 0.U) && (tag_v === virtual) /** returns hit signal */ def hit(vpn: UInt, virtual: Bool): Bool = { if (superpage && usingVM) { var tagMatch = valid.head && (tag_v === virtual) for (j <- 0 until pgLevels) { val base = (pgLevels - 1 - j) * pgLevelBits val n = pgLevelBits + (if (j == 0) hypervisorExtraAddrBits else 0) val ignore = level < j.U || (superpageOnly && j == pgLevels - 1).B tagMatch = tagMatch && (ignore || (tag_vpn ^ vpn)(base + n - 1, base) === 0.U) } tagMatch } else { val idx = sectorIdx(vpn) valid(idx) && sectorTagMatch(vpn, virtual) } } /** returns the ppn of the input TLBEntryData */ def ppn(vpn: UInt, data: TLBEntryData) = { val supervisorVPNBits = pgLevels * pgLevelBits if (superpage && usingVM) { var res = data.ppn >> pgLevelBits*(pgLevels - 1) for (j <- 1 until pgLevels) { val ignore = level < j.U || (superpageOnly && j == pgLevels - 1).B res = Cat(res, (Mux(ignore, vpn, 0.U) | data.ppn)(supervisorVPNBits - j*pgLevelBits - 1, supervisorVPNBits - (j + 1)*pgLevelBits)) } res } else { data.ppn } } /** does the refill * * find the target entry with vpn tag * and replace the target entry with the input entry data */ def insert(vpn: UInt, virtual: Bool, level: UInt, entry: TLBEntryData): Unit = { this.tag_vpn := vpn this.tag_v := virtual this.level := level.extract(log2Ceil(pgLevels - superpageOnly.toInt)-1, 0) val idx = sectorIdx(vpn) valid(idx) := true.B data(idx) := entry.asUInt } def invalidate(): Unit = { valid.foreach(_ := false.B) } def invalidate(virtual: Bool): Unit = { for ((v, e) <- valid zip entry_data) when (tag_v === virtual) { v := false.B } } def invalidateVPN(vpn: UInt, virtual: Bool): Unit = { if (superpage) { when (hit(vpn, virtual)) { invalidate() } } else { when (sectorTagMatch(vpn, virtual)) { for (((v, e), i) <- (valid zip entry_data).zipWithIndex) when (tag_v === virtual && i.U === sectorIdx(vpn)) { v := false.B } } } // For fragmented superpage mappings, we assume the worst (largest) // case, and zap entries whose most-significant VPNs match when (((tag_vpn ^ vpn) >> (pgLevelBits * (pgLevels - 1))) === 0.U) { for ((v, e) <- valid zip entry_data) when (tag_v === virtual && e.fragmented_superpage) { v := false.B } } } def invalidateNonGlobal(virtual: Bool): Unit = { for ((v, e) <- valid zip entry_data) when (tag_v === virtual && !e.g) { v := false.B } } } /** TLB config * * @param nSets the number of sets of PTE, follow [[ICacheParams.nSets]] * @param nWays the total number of wayss of PTE, follow [[ICacheParams.nWays]] * @param nSectors the number of ways in a single PTE TLBEntry * @param nSuperpageEntries the number of SuperpageEntries */ case class TLBConfig( nSets: Int, nWays: Int, nSectors: Int = 4, nSuperpageEntries: Int = 4) /** =Overview= * [[TLB]] is a TLB template which contains PMA logic and PMP checker. * * TLB caches PTE and accelerates the address translation process. * When tlb miss happens, ask PTW(L2TLB) for Page Table Walk. * Perform PMP and PMA check during the translation and throw exception if there were any. * * ==Cache Structure== * - Sectored Entry (PTE) * - set-associative or direct-mapped * - nsets = [[TLBConfig.nSets]] * - nways = [[TLBConfig.nWays]] / [[TLBConfig.nSectors]] * - PTEEntry( sectors = [[TLBConfig.nSectors]] ) * - LRU(if set-associative) * * - Superpage Entry(superpage PTE) * - fully associative * - nsets = [[TLBConfig.nSuperpageEntries]] * - PTEEntry(sectors = 1) * - PseudoLRU * * - Special Entry(PTE across PMP) * - nsets = 1 * - PTEEntry(sectors = 1) * * ==Address structure== * {{{ * |vaddr | * |ppn/vpn | pgIndex | * | | | * | |nSets |nSector | |}}} * * ==State Machine== * {{{ * s_ready: ready to accept request from CPU. * s_request: when L1TLB(this) miss, send request to PTW(L2TLB), . * s_wait: wait for PTW to refill L1TLB. * s_wait_invalidate: L1TLB is waiting for respond from PTW, but L1TLB will invalidate respond from PTW.}}} * * ==PMP== * pmp check * - special_entry: always check * - other entry: check on refill * * ==Note== * PMA consume diplomacy parameter generate physical memory address checking logic * * Boom use Rocket ITLB, and its own DTLB. * * Accelerators:{{{ * sha3: DTLB * gemmini: DTLB * hwacha: DTLB*2+ITLB}}} * @param instruction true for ITLB, false for DTLB * @param lgMaxSize @todo seems granularity * @param cfg [[TLBConfig]] * @param edge collect SoC metadata. */ class TLB(instruction: Boolean, lgMaxSize: Int, cfg: TLBConfig)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) { override def desiredName = if (instruction) "ITLB" else "DTLB" val io = IO(new Bundle { /** request from Core */ val req = Flipped(Decoupled(new TLBReq(lgMaxSize))) /** response to Core */ val resp = Output(new TLBResp(lgMaxSize)) /** SFence Input */ val sfence = Flipped(Valid(new SFenceReq)) /** IO to PTW */ val ptw = new TLBPTWIO /** suppress a TLB refill, one cycle after a miss */ val kill = Input(Bool()) }) io.ptw.customCSRs := DontCare val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits) val vpn = io.req.bits.vaddr(vaddrBits-1, pgIdxBits) /** index for sectored_Entry */ val memIdx = vpn.extract(cfg.nSectors.log2 + cfg.nSets.log2 - 1, cfg.nSectors.log2) /** TLB Entry */ val sectored_entries = Reg(Vec(cfg.nSets, Vec(cfg.nWays / cfg.nSectors, new TLBEntry(cfg.nSectors, false, false)))) /** Superpage Entry */ val superpage_entries = Reg(Vec(cfg.nSuperpageEntries, new TLBEntry(1, true, true))) /** Special Entry * * If PMP granularity is less than page size, thus need additional "special" entry manage PMP. */ val special_entry = (!pageGranularityPMPs).option(Reg(new TLBEntry(1, true, false))) def ordinary_entries = sectored_entries(memIdx) ++ superpage_entries def all_entries = ordinary_entries ++ special_entry def all_real_entries = sectored_entries.flatten ++ superpage_entries ++ special_entry val s_ready :: s_request :: s_wait :: s_wait_invalidate :: Nil = Enum(4) val state = RegInit(s_ready) // use vpn as refill_tag val r_refill_tag = Reg(UInt(vpnBits.W)) val r_superpage_repl_addr = Reg(UInt(log2Ceil(superpage_entries.size).W)) val r_sectored_repl_addr = Reg(UInt(log2Ceil(sectored_entries.head.size).W)) val r_sectored_hit = Reg(Valid(UInt(log2Ceil(sectored_entries.head.size).W))) val r_superpage_hit = Reg(Valid(UInt(log2Ceil(superpage_entries.size).W))) val r_vstage1_en = Reg(Bool()) val r_stage2_en = Reg(Bool()) val r_need_gpa = Reg(Bool()) val r_gpa_valid = Reg(Bool()) val r_gpa = Reg(UInt(vaddrBits.W)) val r_gpa_vpn = Reg(UInt(vpnBits.W)) val r_gpa_is_pte = Reg(Bool()) /** privilege mode */ val priv = io.req.bits.prv val priv_v = usingHypervisor.B && io.req.bits.v val priv_s = priv(0) // user mode and supervisor mode val priv_uses_vm = priv <= PRV.S.U val satp = Mux(priv_v, io.ptw.vsatp, io.ptw.ptbr) val stage1_en = usingVM.B && satp.mode(satp.mode.getWidth-1) /** VS-stage translation enable */ val vstage1_en = usingHypervisor.B && priv_v && io.ptw.vsatp.mode(io.ptw.vsatp.mode.getWidth-1) /** G-stage translation enable */ val stage2_en = usingHypervisor.B && priv_v && io.ptw.hgatp.mode(io.ptw.hgatp.mode.getWidth-1) /** Enable Virtual Memory when: * 1. statically configured * 1. satp highest bits enabled * i. RV32: * - 0 -> Bare * - 1 -> SV32 * i. RV64: * - 0000 -> Bare * - 1000 -> SV39 * - 1001 -> SV48 * - 1010 -> SV57 * - 1011 -> SV64 * 1. In virtualization mode, vsatp highest bits enabled * 1. priv mode in U and S. * 1. in H & M mode, disable VM. * 1. no passthrough(micro-arch defined.) * * @see RV-priv spec 4.1.11 Supervisor Address Translation and Protection (satp) Register * @see RV-priv spec 8.2.18 Virtual Supervisor Address Translation and Protection Register (vsatp) */ val vm_enabled = (stage1_en || stage2_en) && priv_uses_vm && !io.req.bits.passthrough // flush guest entries on vsatp.MODE Bare <-> SvXX transitions val v_entries_use_stage1 = RegInit(false.B) val vsatp_mode_mismatch = priv_v && (vstage1_en =/= v_entries_use_stage1) && !io.req.bits.passthrough // share a single physical memory attribute checker (unshare if critical path) val refill_ppn = io.ptw.resp.bits.pte.ppn(ppnBits-1, 0) /** refill signal */ val do_refill = usingVM.B && io.ptw.resp.valid /** sfence invalidate refill */ val invalidate_refill = state.isOneOf(s_request /* don't care */, s_wait_invalidate) || io.sfence.valid // PMP val mpu_ppn = Mux(do_refill, refill_ppn, Mux(vm_enabled && special_entry.nonEmpty.B, special_entry.map(e => e.ppn(vpn, e.getData(vpn))).getOrElse(0.U), io.req.bits.vaddr >> pgIdxBits)) val mpu_physaddr = Cat(mpu_ppn, io.req.bits.vaddr(pgIdxBits-1, 0)) val mpu_priv = Mux[UInt](usingVM.B && (do_refill || io.req.bits.passthrough /* PTW */), PRV.S.U, Cat(io.ptw.status.debug, priv)) val pmp = Module(new PMPChecker(lgMaxSize)) pmp.io.addr := mpu_physaddr pmp.io.size := io.req.bits.size pmp.io.pmp := (io.ptw.pmp: Seq[PMP]) pmp.io.prv := mpu_priv val pma = Module(new PMAChecker(edge.manager)(p)) pma.io.paddr := mpu_physaddr // todo: using DataScratchpad doesn't support cacheable. val cacheable = pma.io.resp.cacheable && (instruction || !usingDataScratchpad).B val homogeneous = TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), BigInt(1) << pgIdxBits, 1 << lgMaxSize)(mpu_physaddr).homogeneous // In M mode, if access DM address(debug module program buffer) val deny_access_to_debug = mpu_priv <= PRV.M.U && p(DebugModuleKey).map(dmp => dmp.address.contains(mpu_physaddr)).getOrElse(false.B) val prot_r = pma.io.resp.r && !deny_access_to_debug && pmp.io.r val prot_w = pma.io.resp.w && !deny_access_to_debug && pmp.io.w val prot_pp = pma.io.resp.pp val prot_al = pma.io.resp.al val prot_aa = pma.io.resp.aa val prot_x = pma.io.resp.x && !deny_access_to_debug && pmp.io.x val prot_eff = pma.io.resp.eff // hit check val sector_hits = sectored_entries(memIdx).map(_.sectorHit(vpn, priv_v)) val superpage_hits = superpage_entries.map(_.hit(vpn, priv_v)) val hitsVec = all_entries.map(vm_enabled && _.hit(vpn, priv_v)) val real_hits = hitsVec.asUInt val hits = Cat(!vm_enabled, real_hits) // use ptw response to refill // permission bit arrays when (do_refill) { val pte = io.ptw.resp.bits.pte val refill_v = r_vstage1_en || r_stage2_en val newEntry = Wire(new TLBEntryData) newEntry.ppn := pte.ppn newEntry.c := cacheable newEntry.u := pte.u newEntry.g := pte.g && pte.v newEntry.ae_ptw := io.ptw.resp.bits.ae_ptw newEntry.ae_final := io.ptw.resp.bits.ae_final newEntry.ae_stage2 := io.ptw.resp.bits.ae_final && io.ptw.resp.bits.gpa_is_pte && r_stage2_en newEntry.pf := io.ptw.resp.bits.pf newEntry.gf := io.ptw.resp.bits.gf newEntry.hr := io.ptw.resp.bits.hr newEntry.hw := io.ptw.resp.bits.hw newEntry.hx := io.ptw.resp.bits.hx newEntry.sr := pte.sr() newEntry.sw := pte.sw() newEntry.sx := pte.sx() newEntry.pr := prot_r newEntry.pw := prot_w newEntry.px := prot_x newEntry.ppp := prot_pp newEntry.pal := prot_al newEntry.paa := prot_aa newEntry.eff := prot_eff newEntry.fragmented_superpage := io.ptw.resp.bits.fragmented_superpage // refill special_entry when (special_entry.nonEmpty.B && !io.ptw.resp.bits.homogeneous) { special_entry.foreach(_.insert(r_refill_tag, refill_v, io.ptw.resp.bits.level, newEntry)) }.elsewhen (io.ptw.resp.bits.level < (pgLevels-1).U) { val waddr = Mux(r_superpage_hit.valid && usingHypervisor.B, r_superpage_hit.bits, r_superpage_repl_addr) for ((e, i) <- superpage_entries.zipWithIndex) when (r_superpage_repl_addr === i.U) { e.insert(r_refill_tag, refill_v, io.ptw.resp.bits.level, newEntry) when (invalidate_refill) { e.invalidate() } } // refill sectored_hit }.otherwise { val r_memIdx = r_refill_tag.extract(cfg.nSectors.log2 + cfg.nSets.log2 - 1, cfg.nSectors.log2) val waddr = Mux(r_sectored_hit.valid, r_sectored_hit.bits, r_sectored_repl_addr) for ((e, i) <- sectored_entries(r_memIdx).zipWithIndex) when (waddr === i.U) { when (!r_sectored_hit.valid) { e.invalidate() } e.insert(r_refill_tag, refill_v, 0.U, newEntry) when (invalidate_refill) { e.invalidate() } } } r_gpa_valid := io.ptw.resp.bits.gpa.valid r_gpa := io.ptw.resp.bits.gpa.bits r_gpa_is_pte := io.ptw.resp.bits.gpa_is_pte } // get all entries data. val entries = all_entries.map(_.getData(vpn)) val normal_entries = entries.take(ordinary_entries.size) // parallel query PPN from [[all_entries]], if VM not enabled return VPN instead val ppn = Mux1H(hitsVec :+ !vm_enabled, (all_entries zip entries).map{ case (entry, data) => entry.ppn(vpn, data) } :+ vpn(ppnBits-1, 0)) val nPhysicalEntries = 1 + special_entry.size // generally PTW misaligned load exception. val ptw_ae_array = Cat(false.B, entries.map(_.ae_ptw).asUInt) val final_ae_array = Cat(false.B, entries.map(_.ae_final).asUInt) val ptw_pf_array = Cat(false.B, entries.map(_.pf).asUInt) val ptw_gf_array = Cat(false.B, entries.map(_.gf).asUInt) val sum = Mux(priv_v, io.ptw.gstatus.sum, io.ptw.status.sum) // if in hypervisor/machine mode, cannot read/write user entries. // if in superviosr/user mode, "If the SUM bit in the sstatus register is set, supervisor mode software may also access pages with U=1.(from spec)" val priv_rw_ok = Mux(!priv_s || sum, entries.map(_.u).asUInt, 0.U) | Mux(priv_s, ~entries.map(_.u).asUInt, 0.U) // if in hypervisor/machine mode, other than user pages, all pages are executable. // if in superviosr/user mode, only user page can execute. val priv_x_ok = Mux(priv_s, ~entries.map(_.u).asUInt, entries.map(_.u).asUInt) val stage1_bypass = Fill(entries.size, usingHypervisor.B) & (Fill(entries.size, !stage1_en) | entries.map(_.ae_stage2).asUInt) val mxr = io.ptw.status.mxr | Mux(priv_v, io.ptw.gstatus.mxr, false.B) // "The vsstatus field MXR, which makes execute-only pages readable, only overrides VS-stage page protection.(from spec)" val r_array = Cat(true.B, (priv_rw_ok & (entries.map(_.sr).asUInt | Mux(mxr, entries.map(_.sx).asUInt, 0.U))) | stage1_bypass) val w_array = Cat(true.B, (priv_rw_ok & entries.map(_.sw).asUInt) | stage1_bypass) val x_array = Cat(true.B, (priv_x_ok & entries.map(_.sx).asUInt) | stage1_bypass) val stage2_bypass = Fill(entries.size, !stage2_en) val hr_array = Cat(true.B, entries.map(_.hr).asUInt | Mux(io.ptw.status.mxr, entries.map(_.hx).asUInt, 0.U) | stage2_bypass) val hw_array = Cat(true.B, entries.map(_.hw).asUInt | stage2_bypass) val hx_array = Cat(true.B, entries.map(_.hx).asUInt | stage2_bypass) // These array is for each TLB entries. // user mode can read: PMA OK, TLB OK, AE OK val pr_array = Cat(Fill(nPhysicalEntries, prot_r), normal_entries.map(_.pr).asUInt) & ~(ptw_ae_array | final_ae_array) // user mode can write: PMA OK, TLB OK, AE OK val pw_array = Cat(Fill(nPhysicalEntries, prot_w), normal_entries.map(_.pw).asUInt) & ~(ptw_ae_array | final_ae_array) // user mode can write: PMA OK, TLB OK, AE OK val px_array = Cat(Fill(nPhysicalEntries, prot_x), normal_entries.map(_.px).asUInt) & ~(ptw_ae_array | final_ae_array) // put effect val eff_array = Cat(Fill(nPhysicalEntries, prot_eff), normal_entries.map(_.eff).asUInt) // cacheable val c_array = Cat(Fill(nPhysicalEntries, cacheable), normal_entries.map(_.c).asUInt) // put partial val ppp_array = Cat(Fill(nPhysicalEntries, prot_pp), normal_entries.map(_.ppp).asUInt) // atomic arithmetic val paa_array = Cat(Fill(nPhysicalEntries, prot_aa), normal_entries.map(_.paa).asUInt) // atomic logic val pal_array = Cat(Fill(nPhysicalEntries, prot_al), normal_entries.map(_.pal).asUInt) val ppp_array_if_cached = ppp_array | c_array val paa_array_if_cached = paa_array | (if(usingAtomicsInCache) c_array else 0.U) val pal_array_if_cached = pal_array | (if(usingAtomicsInCache) c_array else 0.U) val prefetchable_array = Cat((cacheable && homogeneous) << (nPhysicalEntries-1), normal_entries.map(_.c).asUInt) // vaddr misaligned: vaddr[1:0]=b00 val misaligned = (io.req.bits.vaddr & (UIntToOH(io.req.bits.size) - 1.U)).orR def badVA(guestPA: Boolean): Bool = { val additionalPgLevels = (if (guestPA) io.ptw.hgatp else satp).additionalPgLevels val extraBits = if (guestPA) hypervisorExtraAddrBits else 0 val signed = !guestPA val nPgLevelChoices = pgLevels - minPgLevels + 1 val minVAddrBits = pgIdxBits + minPgLevels * pgLevelBits + extraBits (for (i <- 0 until nPgLevelChoices) yield { val mask = ((BigInt(1) << vaddrBitsExtended) - (BigInt(1) << (minVAddrBits + i * pgLevelBits - signed.toInt))).U val maskedVAddr = io.req.bits.vaddr & mask additionalPgLevels === i.U && !(maskedVAddr === 0.U || signed.B && maskedVAddr === mask) }).orR } val bad_gpa = if (!usingHypervisor) false.B else vm_enabled && !stage1_en && badVA(true) val bad_va = if (!usingVM || (minPgLevels == pgLevels && vaddrBits == vaddrBitsExtended)) false.B else vm_enabled && stage1_en && badVA(false) val cmd_lrsc = usingAtomics.B && io.req.bits.cmd.isOneOf(M_XLR, M_XSC) val cmd_amo_logical = usingAtomics.B && isAMOLogical(io.req.bits.cmd) val cmd_amo_arithmetic = usingAtomics.B && isAMOArithmetic(io.req.bits.cmd) val cmd_put_partial = io.req.bits.cmd === M_PWR val cmd_read = isRead(io.req.bits.cmd) val cmd_readx = usingHypervisor.B && io.req.bits.cmd === M_HLVX val cmd_write = isWrite(io.req.bits.cmd) val cmd_write_perms = cmd_write || io.req.bits.cmd.isOneOf(M_FLUSH_ALL, M_WOK) // not a write, but needs write permissions val lrscAllowed = Mux((usingDataScratchpad || usingAtomicsOnlyForIO).B, 0.U, c_array) val ae_array = Mux(misaligned, eff_array, 0.U) | Mux(cmd_lrsc, ~lrscAllowed, 0.U) // access exception needs SoC information from PMA val ae_ld_array = Mux(cmd_read, ae_array | ~pr_array, 0.U) val ae_st_array = Mux(cmd_write_perms, ae_array | ~pw_array, 0.U) | Mux(cmd_put_partial, ~ppp_array_if_cached, 0.U) | Mux(cmd_amo_logical, ~pal_array_if_cached, 0.U) | Mux(cmd_amo_arithmetic, ~paa_array_if_cached, 0.U) val must_alloc_array = Mux(cmd_put_partial, ~ppp_array, 0.U) | Mux(cmd_amo_logical, ~pal_array, 0.U) | Mux(cmd_amo_arithmetic, ~paa_array, 0.U) | Mux(cmd_lrsc, ~0.U(pal_array.getWidth.W), 0.U) val pf_ld_array = Mux(cmd_read, ((~Mux(cmd_readx, x_array, r_array) & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array, 0.U) val pf_st_array = Mux(cmd_write_perms, ((~w_array & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array, 0.U) val pf_inst_array = ((~x_array & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array val gf_ld_array = Mux(priv_v && cmd_read, (~Mux(cmd_readx, hx_array, hr_array) | ptw_gf_array) & ~ptw_ae_array, 0.U) val gf_st_array = Mux(priv_v && cmd_write_perms, (~hw_array | ptw_gf_array) & ~ptw_ae_array, 0.U) val gf_inst_array = Mux(priv_v, (~hx_array | ptw_gf_array) & ~ptw_ae_array, 0.U) val gpa_hits = { val need_gpa_mask = if (instruction) gf_inst_array else gf_ld_array | gf_st_array val hit_mask = Fill(ordinary_entries.size, r_gpa_valid && r_gpa_vpn === vpn) | Fill(all_entries.size, !vstage1_en) hit_mask | ~need_gpa_mask(all_entries.size-1, 0) } val tlb_hit_if_not_gpa_miss = real_hits.orR val tlb_hit = (real_hits & gpa_hits).orR // leads to s_request val tlb_miss = vm_enabled && !vsatp_mode_mismatch && !bad_va && !tlb_hit val sectored_plru = new SetAssocLRU(cfg.nSets, sectored_entries.head.size, "plru") val superpage_plru = new PseudoLRU(superpage_entries.size) when (io.req.valid && vm_enabled) { // replace when (sector_hits.orR) { sectored_plru.access(memIdx, OHToUInt(sector_hits)) } when (superpage_hits.orR) { superpage_plru.access(OHToUInt(superpage_hits)) } } // Superpages create the possibility that two entries in the TLB may match. // This corresponds to a software bug, but we can't return complete garbage; // we must return either the old translation or the new translation. This // isn't compatible with the Mux1H approach. So, flush the TLB and report // a miss on duplicate entries. val multipleHits = PopCountAtLeast(real_hits, 2) // only pull up req.ready when this is s_ready state. io.req.ready := state === s_ready // page fault io.resp.pf.ld := (bad_va && cmd_read) || (pf_ld_array & hits).orR io.resp.pf.st := (bad_va && cmd_write_perms) || (pf_st_array & hits).orR io.resp.pf.inst := bad_va || (pf_inst_array & hits).orR // guest page fault io.resp.gf.ld := (bad_gpa && cmd_read) || (gf_ld_array & hits).orR io.resp.gf.st := (bad_gpa && cmd_write_perms) || (gf_st_array & hits).orR io.resp.gf.inst := bad_gpa || (gf_inst_array & hits).orR // access exception io.resp.ae.ld := (ae_ld_array & hits).orR io.resp.ae.st := (ae_st_array & hits).orR io.resp.ae.inst := (~px_array & hits).orR // misaligned io.resp.ma.ld := misaligned && cmd_read io.resp.ma.st := misaligned && cmd_write io.resp.ma.inst := false.B // this is up to the pipeline to figure out io.resp.cacheable := (c_array & hits).orR io.resp.must_alloc := (must_alloc_array & hits).orR io.resp.prefetchable := (prefetchable_array & hits).orR && edge.manager.managers.forall(m => !m.supportsAcquireB || m.supportsHint).B io.resp.miss := do_refill || vsatp_mode_mismatch || tlb_miss || multipleHits io.resp.paddr := Cat(ppn, io.req.bits.vaddr(pgIdxBits-1, 0)) io.resp.size := io.req.bits.size io.resp.cmd := io.req.bits.cmd io.resp.gpa_is_pte := vstage1_en && r_gpa_is_pte io.resp.gpa := { val page = Mux(!vstage1_en, Cat(bad_gpa, vpn), r_gpa >> pgIdxBits) val offset = Mux(io.resp.gpa_is_pte, r_gpa(pgIdxBits-1, 0), io.req.bits.vaddr(pgIdxBits-1, 0)) Cat(page, offset) } io.ptw.req.valid := state === s_request io.ptw.req.bits.valid := !io.kill io.ptw.req.bits.bits.addr := r_refill_tag io.ptw.req.bits.bits.vstage1 := r_vstage1_en io.ptw.req.bits.bits.stage2 := r_stage2_en io.ptw.req.bits.bits.need_gpa := r_need_gpa if (usingVM) { when(io.ptw.req.fire && io.ptw.req.bits.valid) { r_gpa_valid := false.B r_gpa_vpn := r_refill_tag } val sfence = io.sfence.valid // this is [[s_ready]] // handle miss/hit at the first cycle. // if miss, request PTW(L2TLB). when (io.req.fire && tlb_miss) { state := s_request r_refill_tag := vpn r_need_gpa := tlb_hit_if_not_gpa_miss r_vstage1_en := vstage1_en r_stage2_en := stage2_en r_superpage_repl_addr := replacementEntry(superpage_entries, superpage_plru.way) r_sectored_repl_addr := replacementEntry(sectored_entries(memIdx), sectored_plru.way(memIdx)) r_sectored_hit.valid := sector_hits.orR r_sectored_hit.bits := OHToUInt(sector_hits) r_superpage_hit.valid := superpage_hits.orR r_superpage_hit.bits := OHToUInt(superpage_hits) } // Handle SFENCE.VMA when send request to PTW. // SFENCE.VMA io.ptw.req.ready kill // ? ? 1 // 0 0 0 // 0 1 0 -> s_wait // 1 0 0 -> s_wait_invalidate // 1 0 0 -> s_ready when (state === s_request) { // SFENCE.VMA will kill TLB entries based on rs1 and rs2. It will take 1 cycle. when (sfence) { state := s_ready } // here should be io.ptw.req.fire, but assert(io.ptw.req.ready === true.B) // fire -> s_wait when (io.ptw.req.ready) { state := Mux(sfence, s_wait_invalidate, s_wait) } // If CPU kills request(frontend.s2_redirect) when (io.kill) { state := s_ready } } // sfence in refill will results in invalidate when (state === s_wait && sfence) { state := s_wait_invalidate } // after CPU acquire response, go back to s_ready. when (io.ptw.resp.valid) { state := s_ready } // SFENCE processing logic. when (sfence) { assert(!io.sfence.bits.rs1 || (io.sfence.bits.addr >> pgIdxBits) === vpn) for (e <- all_real_entries) { val hv = usingHypervisor.B && io.sfence.bits.hv val hg = usingHypervisor.B && io.sfence.bits.hg when (!hg && io.sfence.bits.rs1) { e.invalidateVPN(vpn, hv) } .elsewhen (!hg && io.sfence.bits.rs2) { e.invalidateNonGlobal(hv) } .otherwise { e.invalidate(hv || hg) } } } when(io.req.fire && vsatp_mode_mismatch) { all_real_entries.foreach(_.invalidate(true.B)) v_entries_use_stage1 := vstage1_en } when (multipleHits || reset.asBool) { all_real_entries.foreach(_.invalidate()) } ccover(io.ptw.req.fire, "MISS", "TLB miss") ccover(io.ptw.req.valid && !io.ptw.req.ready, "PTW_STALL", "TLB miss, but PTW busy") ccover(state === s_wait_invalidate, "SFENCE_DURING_REFILL", "flush TLB during TLB refill") ccover(sfence && !io.sfence.bits.rs1 && !io.sfence.bits.rs2, "SFENCE_ALL", "flush TLB") ccover(sfence && !io.sfence.bits.rs1 && io.sfence.bits.rs2, "SFENCE_ASID", "flush TLB ASID") ccover(sfence && io.sfence.bits.rs1 && !io.sfence.bits.rs2, "SFENCE_LINE", "flush TLB line") ccover(sfence && io.sfence.bits.rs1 && io.sfence.bits.rs2, "SFENCE_LINE_ASID", "flush TLB line/ASID") ccover(multipleHits, "MULTIPLE_HITS", "Two matching translations in TLB") } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = property.cover(cond, s"${if (instruction) "I" else "D"}TLB_$label", "MemorySystem;;" + desc) /** Decides which entry to be replaced * * If there is a invalid entry, replace it with priorityencoder; * if not, replace the alt entry * * @return mask for TLBEntry replacement */ def replacementEntry(set: Seq[TLBEntry], alt: UInt) = { val valids = set.map(_.valid.orR).asUInt Mux(valids.andR, alt, PriorityEncoder(~valids)) } } File TLBPermissions.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes, RegionType, AddressDecoder} import freechips.rocketchip.tilelink.TLManagerParameters case class TLBPermissions( homogeneous: Bool, // if false, the below are undefined r: Bool, // readable w: Bool, // writeable x: Bool, // executable c: Bool, // cacheable a: Bool, // arithmetic ops l: Bool) // logical ops object TLBPageLookup { private case class TLBFixedPermissions( e: Boolean, // get-/put-effects r: Boolean, // readable w: Boolean, // writeable x: Boolean, // executable c: Boolean, // cacheable a: Boolean, // arithmetic ops l: Boolean) { // logical ops val useful = r || w || x || c || a || l } private def groupRegions(managers: Seq[TLManagerParameters]): Map[TLBFixedPermissions, Seq[AddressSet]] = { val permissions = managers.map { m => (m.address, TLBFixedPermissions( e = Seq(RegionType.PUT_EFFECTS, RegionType.GET_EFFECTS) contains m.regionType, r = m.supportsGet || m.supportsAcquireB, // if cached, never uses Get w = m.supportsPutFull || m.supportsAcquireT, // if cached, never uses Put x = m.executable, c = m.supportsAcquireB, a = m.supportsArithmetic, l = m.supportsLogical)) } permissions .filter(_._2.useful) // get rid of no-permission devices .groupBy(_._2) // group by permission type .mapValues(seq => AddressSet.unify(seq.flatMap(_._1))) // coalesce same-permission regions .toMap } // Unmapped memory is considered to be inhomogeneous def apply(managers: Seq[TLManagerParameters], xLen: Int, cacheBlockBytes: Int, pageSize: BigInt, maxRequestBytes: Int): UInt => TLBPermissions = { require (isPow2(xLen) && xLen >= 8) require (isPow2(cacheBlockBytes) && cacheBlockBytes >= xLen/8) require (isPow2(pageSize) && pageSize >= cacheBlockBytes) val xferSizes = TransferSizes(cacheBlockBytes, cacheBlockBytes) val allSizes = TransferSizes(1, maxRequestBytes) val amoSizes = TransferSizes(4, xLen/8) val permissions = managers.foreach { m => require (!m.supportsGet || m.supportsGet .contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsGet} Get, but must support ${allSizes}") require (!m.supportsPutFull || m.supportsPutFull .contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsPutFull} PutFull, but must support ${allSizes}") require (!m.supportsPutPartial || m.supportsPutPartial.contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsPutPartial} PutPartial, but must support ${allSizes}") require (!m.supportsAcquireB || m.supportsAcquireB .contains(xferSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsAcquireB} AcquireB, but must support ${xferSizes}") require (!m.supportsAcquireT || m.supportsAcquireT .contains(xferSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsAcquireT} AcquireT, but must support ${xferSizes}") require (!m.supportsLogical || m.supportsLogical .contains(amoSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsLogical} Logical, but must support ${amoSizes}") require (!m.supportsArithmetic || m.supportsArithmetic.contains(amoSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsArithmetic} Arithmetic, but must support ${amoSizes}") require (!(m.supportsAcquireB && m.supportsPutFull && !m.supportsAcquireT), s"Memory region '${m.name}' supports AcquireB (cached read) and PutFull (un-cached write) but not AcquireT (cached write)") } val grouped = groupRegions(managers) .mapValues(_.filter(_.alignment >= pageSize)) // discard any region that's not big enough def lowCostProperty(prop: TLBFixedPermissions => Boolean): UInt => Bool = { val (yesm, nom) = grouped.partition { case (k, eq) => prop(k) } val (yes, no) = (yesm.values.flatten.toList, nom.values.flatten.toList) // Find the minimal bits needed to distinguish between yes and no val decisionMask = AddressDecoder(Seq(yes, no)) def simplify(x: Seq[AddressSet]) = AddressSet.unify(x.map(_.widen(~decisionMask)).distinct) val (yesf, nof) = (simplify(yes), simplify(no)) if (yesf.size < no.size) { (x: UInt) => yesf.map(_.contains(x)).foldLeft(false.B)(_ || _) } else { (x: UInt) => !nof.map(_.contains(x)).foldLeft(false.B)(_ || _) } } // Derive simplified property circuits (don't care when !homo) val rfn = lowCostProperty(_.r) val wfn = lowCostProperty(_.w) val xfn = lowCostProperty(_.x) val cfn = lowCostProperty(_.c) val afn = lowCostProperty(_.a) val lfn = lowCostProperty(_.l) val homo = AddressSet.unify(grouped.values.flatten.toList) (x: UInt) => TLBPermissions( homogeneous = homo.map(_.contains(x)).foldLeft(false.B)(_ || _), r = rfn(x), w = wfn(x), x = xfn(x), c = cfn(x), a = afn(x), l = lfn(x)) } // Are all pageSize intervals of mapped regions homogeneous? def homogeneous(managers: Seq[TLManagerParameters], pageSize: BigInt): Boolean = { groupRegions(managers).values.forall(_.forall(_.alignment >= pageSize)) } } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File PTW.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util.{Arbiter, Cat, Decoupled, Enum, Mux1H, OHToUInt, PopCount, PriorityEncoder, PriorityEncoderOH, RegEnable, UIntToOH, Valid, is, isPow2, log2Ceil, switch} import chisel3.withClock import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.subsystem.CacheBlockBytes import freechips.rocketchip.tile._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property import scala.collection.mutable.ListBuffer /** PTE request from TLB to PTW * * TLB send a PTE request to PTW when L1TLB miss */ class PTWReq(implicit p: Parameters) extends CoreBundle()(p) { val addr = UInt(vpnBits.W) val need_gpa = Bool() val vstage1 = Bool() val stage2 = Bool() } /** PTE info from L2TLB to TLB * * containing: target PTE, exceptions, two-satge tanslation info */ class PTWResp(implicit p: Parameters) extends CoreBundle()(p) { /** ptw access exception */ val ae_ptw = Bool() /** final access exception */ val ae_final = Bool() /** page fault */ val pf = Bool() /** guest page fault */ val gf = Bool() /** hypervisor read */ val hr = Bool() /** hypervisor write */ val hw = Bool() /** hypervisor execute */ val hx = Bool() /** PTE to refill L1TLB * * source: L2TLB */ val pte = new PTE /** pte pglevel */ val level = UInt(log2Ceil(pgLevels).W) /** fragmented_superpage support */ val fragmented_superpage = Bool() /** homogeneous for both pma and pmp */ val homogeneous = Bool() val gpa = Valid(UInt(vaddrBits.W)) val gpa_is_pte = Bool() } /** IO between TLB and PTW * * PTW receives : * - PTE request * - CSRs info * - pmp results from PMP(in TLB) */ class TLBPTWIO(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val req = Decoupled(Valid(new PTWReq)) val resp = Flipped(Valid(new PTWResp)) val ptbr = Input(new PTBR()) val hgatp = Input(new PTBR()) val vsatp = Input(new PTBR()) val status = Input(new MStatus()) val hstatus = Input(new HStatus()) val gstatus = Input(new MStatus()) val pmp = Input(Vec(nPMPs, new PMP)) val customCSRs = Flipped(coreParams.customCSRs) } /** PTW performance statistics */ class PTWPerfEvents extends Bundle { val l2miss = Bool() val l2hit = Bool() val pte_miss = Bool() val pte_hit = Bool() } /** Datapath IO between PTW and Core * * PTW receives CSRs info, pmp checks, sfence instruction info * * PTW sends its performance statistics to core */ class DatapathPTWIO(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val ptbr = Input(new PTBR()) val hgatp = Input(new PTBR()) val vsatp = Input(new PTBR()) val sfence = Flipped(Valid(new SFenceReq)) val status = Input(new MStatus()) val hstatus = Input(new HStatus()) val gstatus = Input(new MStatus()) val pmp = Input(Vec(nPMPs, new PMP)) val perf = Output(new PTWPerfEvents()) val customCSRs = Flipped(coreParams.customCSRs) /** enable clock generated by ptw */ val clock_enabled = Output(Bool()) } /** PTE template for transmission * * contains useful methods to check PTE attributes * @see RV-priv spec 4.3.1 for pgae table entry format */ class PTE(implicit p: Parameters) extends CoreBundle()(p) { val reserved_for_future = UInt(10.W) val ppn = UInt(44.W) val reserved_for_software = Bits(2.W) /** dirty bit */ val d = Bool() /** access bit */ val a = Bool() /** global mapping */ val g = Bool() /** user mode accessible */ val u = Bool() /** whether the page is executable */ val x = Bool() /** whether the page is writable */ val w = Bool() /** whether the page is readable */ val r = Bool() /** valid bit */ val v = Bool() /** return true if find a pointer to next level page table */ def table(dummy: Int = 0) = v && !r && !w && !x && !d && !a && !u && reserved_for_future === 0.U /** return true if find a leaf PTE */ def leaf(dummy: Int = 0) = v && (r || (x && !w)) && a /** user read */ def ur(dummy: Int = 0) = sr() && u /** user write*/ def uw(dummy: Int = 0) = sw() && u /** user execute */ def ux(dummy: Int = 0) = sx() && u /** supervisor read */ def sr(dummy: Int = 0) = leaf() && r /** supervisor write */ def sw(dummy: Int = 0) = leaf() && w && d /** supervisor execute */ def sx(dummy: Int = 0) = leaf() && x /** full permission: writable and executable in user mode */ def isFullPerm(dummy: Int = 0) = uw() && ux() } /** L2TLB PTE template * * contains tag bits * @param nSets number of sets in L2TLB * @see RV-priv spec 4.3.1 for page table entry format */ class L2TLBEntry(nSets: Int)(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val idxBits = log2Ceil(nSets) val tagBits = maxSVAddrBits - pgIdxBits - idxBits + (if (usingHypervisor) 1 else 0) val tag = UInt(tagBits.W) val ppn = UInt(ppnBits.W) /** dirty bit */ val d = Bool() /** access bit */ val a = Bool() /** user mode accessible */ val u = Bool() /** whether the page is executable */ val x = Bool() /** whether the page is writable */ val w = Bool() /** whether the page is readable */ val r = Bool() } /** PTW contains L2TLB, and performs page table walk for high level TLB, and cache queries from L1 TLBs(I$, D$, RoCC) * * It performs hierarchy page table query to mem for the desired leaf PTE and cache them in l2tlb. * Besides leaf PTEs, it also caches non-leaf PTEs in pte_cache to accerlerate the process. * * ==Structure== * - l2tlb : for leaf PTEs * - set-associative (configurable with [[CoreParams.nL2TLBEntries]]and [[CoreParams.nL2TLBWays]])) * - PLRU * - pte_cache: for non-leaf PTEs * - set-associative * - LRU * - s2_pte_cache: for non-leaf PTEs in 2-stage translation * - set-associative * - PLRU * * l2tlb Pipeline: 3 stage * {{{ * stage 0 : read * stage 1 : decode * stage 2 : hit check * }}} * ==State Machine== * s_ready: ready to reveive request from TLB * s_req: request mem; pte_cache hit judge * s_wait1: deal with l2tlb error * s_wait2: final hit judge * s_wait3: receive mem response * s_fragment_superpage: for superpage PTE * * @note l2tlb hit happens in s_req or s_wait1 * @see RV-priv spec 4.3-4.6 for Virtual-Memory System * @see RV-priv spec 8.5 for Two-Stage Address Translation * @todo details in two-stage translation */ class PTW(n: Int)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) { val io = IO(new Bundle { /** to n TLB */ val requestor = Flipped(Vec(n, new TLBPTWIO)) /** to HellaCache */ val mem = new HellaCacheIO /** to Core * * contains CSRs info and performance statistics */ val dpath = new DatapathPTWIO }) val s_ready :: s_req :: s_wait1 :: s_dummy1 :: s_wait2 :: s_wait3 :: s_dummy2 :: s_fragment_superpage :: Nil = Enum(8) val state = RegInit(s_ready) val l2_refill_wire = Wire(Bool()) /** Arbiter to arbite request from n TLB */ val arb = Module(new Arbiter(Valid(new PTWReq), n)) // use TLB req as arbitor's input arb.io.in <> io.requestor.map(_.req) // receive req only when s_ready and not in refill arb.io.out.ready := (state === s_ready) && !l2_refill_wire val resp_valid = RegNext(VecInit(Seq.fill(io.requestor.size)(false.B))) val clock_en = state =/= s_ready || l2_refill_wire || arb.io.out.valid || io.dpath.sfence.valid || io.dpath.customCSRs.disableDCacheClockGate io.dpath.clock_enabled := usingVM.B && clock_en val gated_clock = if (!usingVM || !tileParams.dcache.get.clockGate) clock else ClockGate(clock, clock_en, "ptw_clock_gate") withClock (gated_clock) { // entering gated-clock domain val invalidated = Reg(Bool()) /** current PTE level * {{{ * 0 <= count <= pgLevel-1 * count = pgLevel - 1 : leaf PTE * count < pgLevel - 1 : non-leaf PTE * }}} */ val count = Reg(UInt(log2Ceil(pgLevels).W)) val resp_ae_ptw = Reg(Bool()) val resp_ae_final = Reg(Bool()) val resp_pf = Reg(Bool()) val resp_gf = Reg(Bool()) val resp_hr = Reg(Bool()) val resp_hw = Reg(Bool()) val resp_hx = Reg(Bool()) val resp_fragmented_superpage = Reg(Bool()) /** tlb request */ val r_req = Reg(new PTWReq) /** current selected way in arbitor */ val r_req_dest = Reg(Bits()) // to respond to L1TLB : l2_hit // to construct mem.req.addr val r_pte = Reg(new PTE) val r_hgatp = Reg(new PTBR) // 2-stage pageLevel val aux_count = Reg(UInt(log2Ceil(pgLevels).W)) /** pte for 2-stage translation */ val aux_pte = Reg(new PTE) val gpa_pgoff = Reg(UInt(pgIdxBits.W)) // only valid in resp_gf case val stage2 = Reg(Bool()) val stage2_final = Reg(Bool()) val satp = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp, io.dpath.ptbr) val r_hgatp_initial_count = pgLevels.U - minPgLevels.U - r_hgatp.additionalPgLevels /** 2-stage translation both enable */ val do_both_stages = r_req.vstage1 && r_req.stage2 val max_count = count max aux_count val vpn = Mux(r_req.vstage1 && stage2, aux_pte.ppn, r_req.addr) val mem_resp_valid = RegNext(io.mem.resp.valid) val mem_resp_data = RegNext(io.mem.resp.bits.data) io.mem.uncached_resp.map { resp => assert(!(resp.valid && io.mem.resp.valid)) resp.ready := true.B when (resp.valid) { mem_resp_valid := true.B mem_resp_data := resp.bits.data } } // construct pte from mem.resp val (pte, invalid_paddr, invalid_gpa) = { val tmp = mem_resp_data.asTypeOf(new PTE()) val res = WireDefault(tmp) res.ppn := Mux(do_both_stages && !stage2, tmp.ppn(vpnBits.min(tmp.ppn.getWidth)-1, 0), tmp.ppn(ppnBits-1, 0)) when (tmp.r || tmp.w || tmp.x) { // for superpage mappings, make sure PPN LSBs are zero for (i <- 0 until pgLevels-1) when (count <= i.U && tmp.ppn((pgLevels-1-i)*pgLevelBits-1, (pgLevels-2-i)*pgLevelBits) =/= 0.U) { res.v := false.B } } (res, Mux(do_both_stages && !stage2, (tmp.ppn >> vpnBits) =/= 0.U, (tmp.ppn >> ppnBits) =/= 0.U), do_both_stages && !stage2 && checkInvalidHypervisorGPA(r_hgatp, tmp.ppn)) } // find non-leaf PTE, need traverse val traverse = pte.table() && !invalid_paddr && !invalid_gpa && count < (pgLevels-1).U /** address send to mem for enquerry */ val pte_addr = if (!usingVM) 0.U else { val vpn_idxs = (0 until pgLevels).map { i => val width = pgLevelBits + (if (i <= pgLevels - minPgLevels) hypervisorExtraAddrBits else 0) (vpn >> (pgLevels - i - 1) * pgLevelBits)(width - 1, 0) } val mask = Mux(stage2 && count === r_hgatp_initial_count, ((1 << (hypervisorExtraAddrBits + pgLevelBits)) - 1).U, ((1 << pgLevelBits) - 1).U) val vpn_idx = vpn_idxs(count) & mask val raw_pte_addr = ((r_pte.ppn << pgLevelBits) | vpn_idx) << log2Ceil(xLen / 8) val size = if (usingHypervisor) vaddrBits else paddrBits //use r_pte.ppn as page table base address //use vpn slice as offset raw_pte_addr.apply(size.min(raw_pte_addr.getWidth) - 1, 0) } /** stage2_pte_cache input addr */ val stage2_pte_cache_addr = if (!usingHypervisor) 0.U else { val vpn_idxs = (0 until pgLevels - 1).map { i => (r_req.addr >> (pgLevels - i - 1) * pgLevelBits)(pgLevelBits - 1, 0) } val vpn_idx = vpn_idxs(aux_count) val raw_s2_pte_cache_addr = Cat(aux_pte.ppn, vpn_idx) << log2Ceil(xLen / 8) raw_s2_pte_cache_addr(vaddrBits.min(raw_s2_pte_cache_addr.getWidth) - 1, 0) } def makeFragmentedSuperpagePPN(ppn: UInt): Seq[UInt] = { (pgLevels-1 until 0 by -1).map(i => Cat(ppn >> (pgLevelBits*i), r_req.addr(((pgLevelBits*i) min vpnBits)-1, 0).padTo(pgLevelBits*i))) } /** PTECache caches non-leaf PTE * @param s2 true: 2-stage address translation */ def makePTECache(s2: Boolean): (Bool, UInt) = if (coreParams.nPTECacheEntries == 0) { (false.B, 0.U) } else { val plru = new PseudoLRU(coreParams.nPTECacheEntries) val valid = RegInit(0.U(coreParams.nPTECacheEntries.W)) val tags = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor) 1 + vaddrBits else paddrBits).W))) // not include full pte, only ppn val data = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor && s2) vpnBits else ppnBits).W))) val can_hit = if (s2) count === r_hgatp_initial_count && aux_count < (pgLevels-1).U && r_req.vstage1 && stage2 && !stage2_final else count < (pgLevels-1).U && Mux(r_req.vstage1, stage2, !r_req.stage2) val can_refill = if (s2) do_both_stages && !stage2 && !stage2_final else can_hit val tag = if (s2) Cat(true.B, stage2_pte_cache_addr.padTo(vaddrBits)) else Cat(r_req.vstage1, pte_addr.padTo(if (usingHypervisor) vaddrBits else paddrBits)) val hits = tags.map(_ === tag).asUInt & valid val hit = hits.orR && can_hit // refill with mem response when (mem_resp_valid && traverse && can_refill && !hits.orR && !invalidated) { val r = Mux(valid.andR, plru.way, PriorityEncoder(~valid)) valid := valid | UIntToOH(r) tags(r) := tag data(r) := pte.ppn plru.access(r) } // replace when (hit && state === s_req) { plru.access(OHToUInt(hits)) } when (io.dpath.sfence.valid && (!io.dpath.sfence.bits.rs1 || usingHypervisor.B && io.dpath.sfence.bits.hg)) { valid := 0.U } val lcount = if (s2) aux_count else count for (i <- 0 until pgLevels-1) { ccover(hit && state === s_req && lcount === i.U, s"PTE_CACHE_HIT_L$i", s"PTE cache hit, level $i") } (hit, Mux1H(hits, data)) } // generate pte_cache val (pte_cache_hit, pte_cache_data) = makePTECache(false) // generate pte_cache with 2-stage translation val (stage2_pte_cache_hit, stage2_pte_cache_data) = makePTECache(true) // pte_cache hit or 2-stage pte_cache hit val pte_hit = RegNext(false.B) io.dpath.perf.pte_miss := false.B io.dpath.perf.pte_hit := pte_hit && (state === s_req) && !io.dpath.perf.l2hit assert(!(io.dpath.perf.l2hit && (io.dpath.perf.pte_miss || io.dpath.perf.pte_hit)), "PTE Cache Hit/Miss Performance Monitor Events are lower priority than L2TLB Hit event") // l2_refill happens when find the leaf pte val l2_refill = RegNext(false.B) l2_refill_wire := l2_refill io.dpath.perf.l2miss := false.B io.dpath.perf.l2hit := false.B // l2tlb val (l2_hit, l2_error, l2_pte, l2_tlb_ram) = if (coreParams.nL2TLBEntries == 0) (false.B, false.B, WireDefault(0.U.asTypeOf(new PTE)), None) else { val code = new ParityCode require(isPow2(coreParams.nL2TLBEntries)) require(isPow2(coreParams.nL2TLBWays)) require(coreParams.nL2TLBEntries >= coreParams.nL2TLBWays) val nL2TLBSets = coreParams.nL2TLBEntries / coreParams.nL2TLBWays require(isPow2(nL2TLBSets)) val idxBits = log2Ceil(nL2TLBSets) val l2_plru = new SetAssocLRU(nL2TLBSets, coreParams.nL2TLBWays, "plru") val ram = DescribedSRAM( name = "l2_tlb_ram", desc = "L2 TLB", size = nL2TLBSets, data = Vec(coreParams.nL2TLBWays, UInt(code.width(new L2TLBEntry(nL2TLBSets).getWidth).W)) ) val g = Reg(Vec(coreParams.nL2TLBWays, UInt(nL2TLBSets.W))) val valid = RegInit(VecInit(Seq.fill(coreParams.nL2TLBWays)(0.U(nL2TLBSets.W)))) // use r_req to construct tag val (r_tag, r_idx) = Split(Cat(r_req.vstage1, r_req.addr(maxSVAddrBits-pgIdxBits-1, 0)), idxBits) /** the valid vec for the selected set(including n ways) */ val r_valid_vec = valid.map(_(r_idx)).asUInt val r_valid_vec_q = Reg(UInt(coreParams.nL2TLBWays.W)) val r_l2_plru_way = Reg(UInt(log2Ceil(coreParams.nL2TLBWays max 1).W)) r_valid_vec_q := r_valid_vec // replacement way r_l2_plru_way := (if (coreParams.nL2TLBWays > 1) l2_plru.way(r_idx) else 0.U) // refill with r_pte(leaf pte) when (l2_refill && !invalidated) { val entry = Wire(new L2TLBEntry(nL2TLBSets)) entry.ppn := r_pte.ppn entry.d := r_pte.d entry.a := r_pte.a entry.u := r_pte.u entry.x := r_pte.x entry.w := r_pte.w entry.r := r_pte.r entry.tag := r_tag // if all the way are valid, use plru to select one way to be replaced, // otherwise use PriorityEncoderOH to select one val wmask = if (coreParams.nL2TLBWays > 1) Mux(r_valid_vec_q.andR, UIntToOH(r_l2_plru_way, coreParams.nL2TLBWays), PriorityEncoderOH(~r_valid_vec_q)) else 1.U(1.W) ram.write(r_idx, VecInit(Seq.fill(coreParams.nL2TLBWays)(code.encode(entry.asUInt))), wmask.asBools) val mask = UIntToOH(r_idx) for (way <- 0 until coreParams.nL2TLBWays) { when (wmask(way)) { valid(way) := valid(way) | mask g(way) := Mux(r_pte.g, g(way) | mask, g(way) & ~mask) } } } // sfence happens when (io.dpath.sfence.valid) { val hg = usingHypervisor.B && io.dpath.sfence.bits.hg for (way <- 0 until coreParams.nL2TLBWays) { valid(way) := Mux(!hg && io.dpath.sfence.bits.rs1, valid(way) & ~UIntToOH(io.dpath.sfence.bits.addr(idxBits+pgIdxBits-1, pgIdxBits)), Mux(!hg && io.dpath.sfence.bits.rs2, valid(way) & g(way), 0.U)) } } val s0_valid = !l2_refill && arb.io.out.fire val s0_suitable = arb.io.out.bits.bits.vstage1 === arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.need_gpa val s1_valid = RegNext(s0_valid && s0_suitable && arb.io.out.bits.valid) val s2_valid = RegNext(s1_valid) // read from tlb idx val s1_rdata = ram.read(arb.io.out.bits.bits.addr(idxBits-1, 0), s0_valid) val s2_rdata = s1_rdata.map(s1_rdway => code.decode(RegEnable(s1_rdway, s1_valid))) val s2_valid_vec = RegEnable(r_valid_vec, s1_valid) val s2_g_vec = RegEnable(VecInit(g.map(_(r_idx))), s1_valid) val s2_error = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && s2_rdata(way).error).orR when (s2_valid && s2_error) { valid.foreach { _ := 0.U }} // decode val s2_entry_vec = s2_rdata.map(_.uncorrected.asTypeOf(new L2TLBEntry(nL2TLBSets))) val s2_hit_vec = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && (r_tag === s2_entry_vec(way).tag)) val s2_hit = s2_valid && s2_hit_vec.orR io.dpath.perf.l2miss := s2_valid && !(s2_hit_vec.orR) io.dpath.perf.l2hit := s2_hit when (s2_hit) { l2_plru.access(r_idx, OHToUInt(s2_hit_vec)) assert((PopCount(s2_hit_vec) === 1.U) || s2_error, "L2 TLB multi-hit") } val s2_pte = Wire(new PTE) val s2_hit_entry = Mux1H(s2_hit_vec, s2_entry_vec) s2_pte.ppn := s2_hit_entry.ppn s2_pte.d := s2_hit_entry.d s2_pte.a := s2_hit_entry.a s2_pte.g := Mux1H(s2_hit_vec, s2_g_vec) s2_pte.u := s2_hit_entry.u s2_pte.x := s2_hit_entry.x s2_pte.w := s2_hit_entry.w s2_pte.r := s2_hit_entry.r s2_pte.v := true.B s2_pte.reserved_for_future := 0.U s2_pte.reserved_for_software := 0.U for (way <- 0 until coreParams.nL2TLBWays) { ccover(s2_hit && s2_hit_vec(way), s"L2_TLB_HIT_WAY$way", s"L2 TLB hit way$way") } (s2_hit, s2_error, s2_pte, Some(ram)) } // if SFENCE occurs during walk, don't refill PTE cache or L2 TLB until next walk invalidated := io.dpath.sfence.valid || (invalidated && state =/= s_ready) // mem request io.mem.keep_clock_enabled := false.B io.mem.req.valid := state === s_req || state === s_dummy1 io.mem.req.bits.phys := true.B io.mem.req.bits.cmd := M_XRD io.mem.req.bits.size := log2Ceil(xLen/8).U io.mem.req.bits.signed := false.B io.mem.req.bits.addr := pte_addr io.mem.req.bits.idx.foreach(_ := pte_addr) io.mem.req.bits.dprv := PRV.S.U // PTW accesses are S-mode by definition io.mem.req.bits.dv := do_both_stages && !stage2 io.mem.req.bits.tag := DontCare io.mem.req.bits.no_resp := false.B io.mem.req.bits.no_alloc := DontCare io.mem.req.bits.no_xcpt := DontCare io.mem.req.bits.data := DontCare io.mem.req.bits.mask := DontCare io.mem.s1_kill := l2_hit || (state =/= s_wait1) || resp_gf io.mem.s1_data := DontCare io.mem.s2_kill := false.B val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits) require(!usingHypervisor || pageGranularityPMPs, s"hypervisor requires pmpGranularity >= ${1<<pgIdxBits}") val pmaPgLevelHomogeneous = (0 until pgLevels) map { i => val pgSize = BigInt(1) << (pgIdxBits + ((pgLevels - 1 - i) * pgLevelBits)) if (pageGranularityPMPs && i == pgLevels - 1) { require(TLBPageLookup.homogeneous(edge.manager.managers, pgSize), s"All memory regions must be $pgSize-byte aligned") true.B } else { TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), pgSize, xLen/8)(r_pte.ppn << pgIdxBits).homogeneous } } val pmaHomogeneous = pmaPgLevelHomogeneous(count) val pmpHomogeneous = new PMPHomogeneityChecker(io.dpath.pmp).apply(r_pte.ppn << pgIdxBits, count) val homogeneous = pmaHomogeneous && pmpHomogeneous // response to tlb for (i <- 0 until io.requestor.size) { io.requestor(i).resp.valid := resp_valid(i) io.requestor(i).resp.bits.ae_ptw := resp_ae_ptw io.requestor(i).resp.bits.ae_final := resp_ae_final io.requestor(i).resp.bits.pf := resp_pf io.requestor(i).resp.bits.gf := resp_gf io.requestor(i).resp.bits.hr := resp_hr io.requestor(i).resp.bits.hw := resp_hw io.requestor(i).resp.bits.hx := resp_hx io.requestor(i).resp.bits.pte := r_pte io.requestor(i).resp.bits.level := max_count io.requestor(i).resp.bits.homogeneous := homogeneous || pageGranularityPMPs.B io.requestor(i).resp.bits.fragmented_superpage := resp_fragmented_superpage && pageGranularityPMPs.B io.requestor(i).resp.bits.gpa.valid := r_req.need_gpa io.requestor(i).resp.bits.gpa.bits := Cat(Mux(!stage2_final || !r_req.vstage1 || aux_count === (pgLevels - 1).U, aux_pte.ppn, makeFragmentedSuperpagePPN(aux_pte.ppn)(aux_count)), gpa_pgoff) io.requestor(i).resp.bits.gpa_is_pte := !stage2_final io.requestor(i).ptbr := io.dpath.ptbr io.requestor(i).hgatp := io.dpath.hgatp io.requestor(i).vsatp := io.dpath.vsatp io.requestor(i).customCSRs <> io.dpath.customCSRs io.requestor(i).status := io.dpath.status io.requestor(i).hstatus := io.dpath.hstatus io.requestor(i).gstatus := io.dpath.gstatus io.requestor(i).pmp := io.dpath.pmp } // control state machine val next_state = WireDefault(state) state := OptimizationBarrier(next_state) val do_switch = WireDefault(false.B) switch (state) { is (s_ready) { when (arb.io.out.fire) { val satp_initial_count = pgLevels.U - minPgLevels.U - satp.additionalPgLevels val vsatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.vsatp.additionalPgLevels val hgatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.hgatp.additionalPgLevels val aux_ppn = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp.ppn, arb.io.out.bits.bits.addr) r_req := arb.io.out.bits.bits r_req_dest := arb.io.chosen next_state := Mux(arb.io.out.bits.valid, s_req, s_ready) stage2 := arb.io.out.bits.bits.stage2 stage2_final := arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.vstage1 count := Mux(arb.io.out.bits.bits.stage2, hgatp_initial_count, satp_initial_count) aux_count := Mux(arb.io.out.bits.bits.vstage1, vsatp_initial_count, 0.U) aux_pte.ppn := aux_ppn aux_pte.reserved_for_future := 0.U resp_ae_ptw := false.B resp_ae_final := false.B resp_pf := false.B resp_gf := checkInvalidHypervisorGPA(io.dpath.hgatp, aux_ppn) && arb.io.out.bits.bits.stage2 resp_hr := true.B resp_hw := true.B resp_hx := true.B resp_fragmented_superpage := false.B r_hgatp := io.dpath.hgatp assert(!arb.io.out.bits.bits.need_gpa || arb.io.out.bits.bits.stage2) } } is (s_req) { when(stage2 && count === r_hgatp_initial_count) { gpa_pgoff := Mux(aux_count === (pgLevels-1).U, r_req.addr << (xLen/8).log2, stage2_pte_cache_addr) } // pte_cache hit when (stage2_pte_cache_hit) { aux_count := aux_count + 1.U aux_pte.ppn := stage2_pte_cache_data aux_pte.reserved_for_future := 0.U pte_hit := true.B }.elsewhen (pte_cache_hit) { count := count + 1.U pte_hit := true.B }.otherwise { next_state := Mux(io.mem.req.ready, s_wait1, s_req) } when(resp_gf) { next_state := s_ready resp_valid(r_req_dest) := true.B } } is (s_wait1) { // This Mux is for the l2_error case; the l2_hit && !l2_error case is overriden below next_state := Mux(l2_hit, s_req, s_wait2) } is (s_wait2) { next_state := s_wait3 io.dpath.perf.pte_miss := count < (pgLevels-1).U when (io.mem.s2_xcpt.ae.ld) { resp_ae_ptw := true.B next_state := s_ready resp_valid(r_req_dest) := true.B } } is (s_fragment_superpage) { next_state := s_ready resp_valid(r_req_dest) := true.B when (!homogeneous) { count := (pgLevels-1).U resp_fragmented_superpage := true.B } when (do_both_stages) { resp_fragmented_superpage := true.B } } } val merged_pte = { val superpage_masks = (0 until pgLevels).map(i => ((BigInt(1) << pte.ppn.getWidth) - (BigInt(1) << (pgLevels-1-i)*pgLevelBits)).U) val superpage_mask = superpage_masks(Mux(stage2_final, max_count, (pgLevels-1).U)) val stage1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), aux_pte.ppn((pgLevels-i-1)*pgLevelBits-1,0))) :+ pte.ppn val stage1_ppn = stage1_ppns(count) makePTE(stage1_ppn & superpage_mask, aux_pte) } r_pte := OptimizationBarrier( // l2tlb hit->find a leaf PTE(l2_pte), respond to L1TLB Mux(l2_hit && !l2_error && !resp_gf, l2_pte, // S2 PTE cache hit -> proceed to the next level of walking, update the r_pte with hgatp Mux(state === s_req && stage2_pte_cache_hit, makeHypervisorRootPTE(r_hgatp, stage2_pte_cache_data, l2_pte), // pte cache hit->find a non-leaf PTE(pte_cache),continue to request mem Mux(state === s_req && pte_cache_hit, makePTE(pte_cache_data, l2_pte), // 2-stage translation Mux(do_switch, makeHypervisorRootPTE(r_hgatp, pte.ppn, r_pte), // when mem respond, store mem.resp.pte Mux(mem_resp_valid, Mux(!traverse && r_req.vstage1 && stage2, merged_pte, pte), // fragment_superpage Mux(state === s_fragment_superpage && !homogeneous && count =/= (pgLevels - 1).U, makePTE(makeFragmentedSuperpagePPN(r_pte.ppn)(count), r_pte), // when tlb request come->request mem, use root address in satp(or vsatp,hgatp) Mux(arb.io.out.fire, Mux(arb.io.out.bits.bits.stage2, makeHypervisorRootPTE(io.dpath.hgatp, io.dpath.vsatp.ppn, r_pte), makePTE(satp.ppn, r_pte)), r_pte)))))))) when (l2_hit && !l2_error && !resp_gf) { assert(state === s_req || state === s_wait1) next_state := s_ready resp_valid(r_req_dest) := true.B count := (pgLevels-1).U } when (mem_resp_valid) { assert(state === s_wait3) next_state := s_req when (traverse) { when (do_both_stages && !stage2) { do_switch := true.B } count := count + 1.U }.otherwise { val gf = (stage2 && !stage2_final && !pte.ur()) || (pte.leaf() && pte.reserved_for_future === 0.U && invalid_gpa) val ae = pte.v && invalid_paddr val pf = pte.v && pte.reserved_for_future =/= 0.U val success = pte.v && !ae && !pf && !gf when (do_both_stages && !stage2_final && success) { when (stage2) { stage2 := false.B count := aux_count }.otherwise { stage2_final := true.B do_switch := true.B } }.otherwise { // find a leaf pte, start l2 refill l2_refill := success && count === (pgLevels-1).U && !r_req.need_gpa && (!r_req.vstage1 && !r_req.stage2 || do_both_stages && aux_count === (pgLevels-1).U && pte.isFullPerm()) count := max_count when (pageGranularityPMPs.B && !(count === (pgLevels-1).U && (!do_both_stages || aux_count === (pgLevels-1).U))) { next_state := s_fragment_superpage }.otherwise { next_state := s_ready resp_valid(r_req_dest) := true.B } resp_ae_ptw := ae && count < (pgLevels-1).U && pte.table() resp_ae_final := ae && pte.leaf() resp_pf := pf && !stage2 resp_gf := gf || (pf && stage2) resp_hr := !stage2 || (!pf && !gf && pte.ur()) resp_hw := !stage2 || (!pf && !gf && pte.uw()) resp_hx := !stage2 || (!pf && !gf && pte.ux()) } } } when (io.mem.s2_nack) { assert(state === s_wait2) next_state := s_req } when (do_switch) { aux_count := Mux(traverse, count + 1.U, count) count := r_hgatp_initial_count aux_pte := Mux(traverse, pte, { val s1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), r_req.addr(((pgLevels-i-1)*pgLevelBits min vpnBits)-1,0).padTo((pgLevels-i-1)*pgLevelBits))) :+ pte.ppn makePTE(s1_ppns(count), pte) }) stage2 := true.B } for (i <- 0 until pgLevels) { val leaf = mem_resp_valid && !traverse && count === i.U ccover(leaf && pte.v && !invalid_paddr && !invalid_gpa && pte.reserved_for_future === 0.U, s"L$i", s"successful page-table access, level $i") ccover(leaf && pte.v && invalid_paddr, s"L${i}_BAD_PPN_MSB", s"PPN too large, level $i") ccover(leaf && pte.v && invalid_gpa, s"L${i}_BAD_GPA_MSB", s"GPA too large, level $i") ccover(leaf && pte.v && pte.reserved_for_future =/= 0.U, s"L${i}_BAD_RSV_MSB", s"reserved MSBs set, level $i") ccover(leaf && !mem_resp_data(0), s"L${i}_INVALID_PTE", s"page not present, level $i") if (i != pgLevels-1) ccover(leaf && !pte.v && mem_resp_data(0), s"L${i}_BAD_PPN_LSB", s"PPN LSBs not zero, level $i") } ccover(mem_resp_valid && count === (pgLevels-1).U && pte.table(), s"TOO_DEEP", s"page table too deep") ccover(io.mem.s2_nack, "NACK", "D$ nacked page-table access") ccover(state === s_wait2 && io.mem.s2_xcpt.ae.ld, "AE", "access exception while walking page table") } // leaving gated-clock domain private def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = if (usingVM) property.cover(cond, s"PTW_$label", "MemorySystem;;" + desc) /** Relace PTE.ppn with ppn */ private def makePTE(ppn: UInt, default: PTE) = { val pte = WireDefault(default) pte.ppn := ppn pte } /** use hgatp and vpn to construct a new ppn */ private def makeHypervisorRootPTE(hgatp: PTBR, vpn: UInt, default: PTE) = { val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> (pgLevels-i)*pgLevelBits)) val lsbs = WireDefault(UInt(maxHypervisorExtraAddrBits.W), idxs(count)) val pte = WireDefault(default) pte.ppn := Cat(hgatp.ppn >> maxHypervisorExtraAddrBits, lsbs) pte } /** use hgatp and vpn to check for gpa out of range */ private def checkInvalidHypervisorGPA(hgatp: PTBR, vpn: UInt) = { val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> ((pgLevels-i)*pgLevelBits)+maxHypervisorExtraAddrBits)) idxs.extract(count) =/= 0.U } } /** Mix-ins for constructing tiles that might have a PTW */ trait CanHavePTW extends HasTileParameters with HasHellaCache { this: BaseTile => val module: CanHavePTWModule var nPTWPorts = 1 nDCachePorts += usingPTW.toInt } trait CanHavePTWModule extends HasHellaCacheModule { val outer: CanHavePTW val ptwPorts = ListBuffer(outer.dcache.module.io.ptw) val ptw = Module(new PTW(outer.nPTWPorts)(outer.dcache.node.edges.out(0), outer.p)) ptw.io.mem <> DontCare if (outer.usingPTW) { dcachePorts += ptw.io.mem } }
module DTLB_6( // @[TLB.scala:318:7] input clock, // @[TLB.scala:318:7] input reset, // @[TLB.scala:318:7] output io_req_ready, // @[TLB.scala:320:14] input io_req_valid, // @[TLB.scala:320:14] input [39:0] io_req_bits_vaddr, // @[TLB.scala:320:14] input [1:0] io_req_bits_size, // @[TLB.scala:320:14] input [4:0] io_req_bits_cmd, // @[TLB.scala:320:14] output io_resp_miss, // @[TLB.scala:320:14] output [31:0] io_resp_paddr, // @[TLB.scala:320:14] input io_sfence_valid, // @[TLB.scala:320:14] input io_ptw_req_ready, // @[TLB.scala:320:14] output io_ptw_req_valid, // @[TLB.scala:320:14] output [26:0] io_ptw_req_bits_bits_addr, // @[TLB.scala:320:14] output io_ptw_req_bits_bits_need_gpa, // @[TLB.scala:320:14] input io_ptw_resp_valid, // @[TLB.scala:320:14] input io_ptw_resp_bits_ae_ptw, // @[TLB.scala:320:14] input io_ptw_resp_bits_ae_final, // @[TLB.scala:320:14] input io_ptw_resp_bits_pf, // @[TLB.scala:320:14] input io_ptw_resp_bits_gf, // @[TLB.scala:320:14] input io_ptw_resp_bits_hr, // @[TLB.scala:320:14] input io_ptw_resp_bits_hw, // @[TLB.scala:320:14] input io_ptw_resp_bits_hx, // @[TLB.scala:320:14] input [9:0] io_ptw_resp_bits_pte_reserved_for_future, // @[TLB.scala:320:14] input [43:0] io_ptw_resp_bits_pte_ppn, // @[TLB.scala:320:14] input [1:0] io_ptw_resp_bits_pte_reserved_for_software, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_d, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_a, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_g, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_u, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_x, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_w, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_r, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_v, // @[TLB.scala:320:14] input [1:0] io_ptw_resp_bits_level, // @[TLB.scala:320:14] input io_ptw_resp_bits_homogeneous, // @[TLB.scala:320:14] input io_ptw_resp_bits_gpa_valid, // @[TLB.scala:320:14] input [38:0] io_ptw_resp_bits_gpa_bits, // @[TLB.scala:320:14] input io_ptw_resp_bits_gpa_is_pte, // @[TLB.scala:320:14] input [3:0] io_ptw_ptbr_mode, // @[TLB.scala:320:14] input [43:0] io_ptw_ptbr_ppn, // @[TLB.scala:320:14] input io_ptw_status_debug, // @[TLB.scala:320:14] input io_ptw_status_cease, // @[TLB.scala:320:14] input io_ptw_status_wfi, // @[TLB.scala:320:14] input [31:0] io_ptw_status_isa, // @[TLB.scala:320:14] input [1:0] io_ptw_status_dprv, // @[TLB.scala:320:14] input io_ptw_status_dv, // @[TLB.scala:320:14] input [1:0] io_ptw_status_prv, // @[TLB.scala:320:14] input io_ptw_status_v, // @[TLB.scala:320:14] input io_ptw_status_sd, // @[TLB.scala:320:14] input [22:0] io_ptw_status_zero2, // @[TLB.scala:320:14] input io_ptw_status_mpv, // @[TLB.scala:320:14] input io_ptw_status_gva, // @[TLB.scala:320:14] input io_ptw_status_mbe, // @[TLB.scala:320:14] input io_ptw_status_sbe, // @[TLB.scala:320:14] input [1:0] io_ptw_status_sxl, // @[TLB.scala:320:14] input [1:0] io_ptw_status_uxl, // @[TLB.scala:320:14] input io_ptw_status_sd_rv32, // @[TLB.scala:320:14] input [7:0] io_ptw_status_zero1, // @[TLB.scala:320:14] input io_ptw_status_tsr, // @[TLB.scala:320:14] input io_ptw_status_tw, // @[TLB.scala:320:14] input io_ptw_status_tvm, // @[TLB.scala:320:14] input io_ptw_status_mxr, // @[TLB.scala:320:14] input io_ptw_status_sum, // @[TLB.scala:320:14] input io_ptw_status_mprv, // @[TLB.scala:320:14] input [1:0] io_ptw_status_xs, // @[TLB.scala:320:14] input [1:0] io_ptw_status_fs, // @[TLB.scala:320:14] input [1:0] io_ptw_status_mpp, // @[TLB.scala:320:14] input [1:0] io_ptw_status_vs, // @[TLB.scala:320:14] input io_ptw_status_spp, // @[TLB.scala:320:14] input io_ptw_status_mpie, // @[TLB.scala:320:14] input io_ptw_status_ube, // @[TLB.scala:320:14] input io_ptw_status_spie, // @[TLB.scala:320:14] input io_ptw_status_upie, // @[TLB.scala:320:14] input io_ptw_status_mie, // @[TLB.scala:320:14] input io_ptw_status_hie, // @[TLB.scala:320:14] input io_ptw_status_sie, // @[TLB.scala:320:14] input io_ptw_status_uie, // @[TLB.scala:320:14] input io_ptw_hstatus_spvp, // @[TLB.scala:320:14] input io_ptw_hstatus_spv, // @[TLB.scala:320:14] input io_ptw_hstatus_gva, // @[TLB.scala:320:14] input io_ptw_gstatus_debug, // @[TLB.scala:320:14] input io_ptw_gstatus_cease, // @[TLB.scala:320:14] input io_ptw_gstatus_wfi, // @[TLB.scala:320:14] input [31:0] io_ptw_gstatus_isa, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_dprv, // @[TLB.scala:320:14] input io_ptw_gstatus_dv, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_prv, // @[TLB.scala:320:14] input io_ptw_gstatus_v, // @[TLB.scala:320:14] input [22:0] io_ptw_gstatus_zero2, // @[TLB.scala:320:14] input io_ptw_gstatus_mpv, // @[TLB.scala:320:14] input io_ptw_gstatus_gva, // @[TLB.scala:320:14] input io_ptw_gstatus_mbe, // @[TLB.scala:320:14] input io_ptw_gstatus_sbe, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_sxl, // @[TLB.scala:320:14] input [7:0] io_ptw_gstatus_zero1, // @[TLB.scala:320:14] input io_ptw_gstatus_tsr, // @[TLB.scala:320:14] input io_ptw_gstatus_tw, // @[TLB.scala:320:14] input io_ptw_gstatus_tvm, // @[TLB.scala:320:14] input io_ptw_gstatus_mxr, // @[TLB.scala:320:14] input io_ptw_gstatus_sum, // @[TLB.scala:320:14] input io_ptw_gstatus_mprv, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_fs, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_mpp, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_vs, // @[TLB.scala:320:14] input io_ptw_gstatus_spp, // @[TLB.scala:320:14] input io_ptw_gstatus_mpie, // @[TLB.scala:320:14] input io_ptw_gstatus_ube, // @[TLB.scala:320:14] input io_ptw_gstatus_spie, // @[TLB.scala:320:14] input io_ptw_gstatus_upie, // @[TLB.scala:320:14] input io_ptw_gstatus_mie, // @[TLB.scala:320:14] input io_ptw_gstatus_hie, // @[TLB.scala:320:14] input io_ptw_gstatus_sie, // @[TLB.scala:320:14] input io_ptw_gstatus_uie, // @[TLB.scala:320:14] input io_ptw_pmp_0_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_0_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_0_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_0_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_0_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_0_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_0_mask, // @[TLB.scala:320:14] input io_ptw_pmp_1_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_1_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_1_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_1_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_1_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_1_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_1_mask, // @[TLB.scala:320:14] input io_ptw_pmp_2_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_2_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_2_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_2_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_2_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_2_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_2_mask, // @[TLB.scala:320:14] input io_ptw_pmp_3_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_3_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_3_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_3_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_3_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_3_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_3_mask, // @[TLB.scala:320:14] input io_ptw_pmp_4_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_4_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_4_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_4_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_4_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_4_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_4_mask, // @[TLB.scala:320:14] input io_ptw_pmp_5_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_5_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_5_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_5_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_5_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_5_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_5_mask, // @[TLB.scala:320:14] input io_ptw_pmp_6_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_6_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_6_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_6_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_6_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_6_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_6_mask, // @[TLB.scala:320:14] input io_ptw_pmp_7_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_7_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_7_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_7_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_7_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_7_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_7_mask, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_0_ren, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_0_wen, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_0_wdata, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_0_value, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_1_ren, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_1_wen, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_1_wdata, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_1_value, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_2_ren, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_2_wen, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_2_wdata, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_2_value, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_3_ren, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_3_wen, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_3_wdata, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_3_value // @[TLB.scala:320:14] ); wire [19:0] _entries_barrier_5_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_5_io_y_u; // @[package.scala:267:25] wire _entries_barrier_5_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_5_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_5_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_5_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_5_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_5_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_5_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_5_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_5_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_5_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_5_io_y_hr; // @[package.scala:267:25] wire [19:0] _entries_barrier_4_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_4_io_y_u; // @[package.scala:267:25] wire _entries_barrier_4_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_4_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_4_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_4_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_4_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_4_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_4_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_4_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_4_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_4_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_4_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_4_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_4_io_y_px; // @[package.scala:267:25] wire _entries_barrier_4_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_4_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_4_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_4_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_4_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_4_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_3_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_3_io_y_u; // @[package.scala:267:25] wire _entries_barrier_3_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_3_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_3_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_3_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_3_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_3_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_3_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_3_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_3_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_3_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_3_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_3_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_3_io_y_px; // @[package.scala:267:25] wire _entries_barrier_3_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_3_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_3_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_3_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_3_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_3_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_2_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_2_io_y_u; // @[package.scala:267:25] wire _entries_barrier_2_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_2_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_2_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_2_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_2_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_2_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_2_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_2_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_2_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_2_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_2_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_2_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_2_io_y_px; // @[package.scala:267:25] wire _entries_barrier_2_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_2_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_2_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_2_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_2_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_2_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_1_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_1_io_y_u; // @[package.scala:267:25] wire _entries_barrier_1_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_1_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_1_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_1_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_1_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_1_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_1_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_1_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_1_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_1_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_1_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_1_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_1_io_y_px; // @[package.scala:267:25] wire _entries_barrier_1_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_1_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_1_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_1_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_1_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_1_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_io_y_u; // @[package.scala:267:25] wire _entries_barrier_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_io_y_px; // @[package.scala:267:25] wire _entries_barrier_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_io_y_c; // @[package.scala:267:25] wire _pma_io_resp_r; // @[TLB.scala:422:19] wire _pma_io_resp_w; // @[TLB.scala:422:19] wire _pma_io_resp_pp; // @[TLB.scala:422:19] wire _pma_io_resp_al; // @[TLB.scala:422:19] wire _pma_io_resp_aa; // @[TLB.scala:422:19] wire _pma_io_resp_x; // @[TLB.scala:422:19] wire _pma_io_resp_eff; // @[TLB.scala:422:19] wire _pmp_io_r; // @[TLB.scala:416:19] wire _pmp_io_w; // @[TLB.scala:416:19] wire _pmp_io_x; // @[TLB.scala:416:19] wire [19:0] _mpu_ppn_barrier_io_y_ppn; // @[package.scala:267:25] wire io_req_valid_0 = io_req_valid; // @[TLB.scala:318:7] wire [39:0] io_req_bits_vaddr_0 = io_req_bits_vaddr; // @[TLB.scala:318:7] wire [1:0] io_req_bits_size_0 = io_req_bits_size; // @[TLB.scala:318:7] wire [4:0] io_req_bits_cmd_0 = io_req_bits_cmd; // @[TLB.scala:318:7] wire io_sfence_valid_0 = io_sfence_valid; // @[TLB.scala:318:7] wire io_ptw_req_ready_0 = io_ptw_req_ready; // @[TLB.scala:318:7] wire io_ptw_resp_valid_0 = io_ptw_resp_valid; // @[TLB.scala:318:7] wire io_ptw_resp_bits_ae_ptw_0 = io_ptw_resp_bits_ae_ptw; // @[TLB.scala:318:7] wire io_ptw_resp_bits_ae_final_0 = io_ptw_resp_bits_ae_final; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pf_0 = io_ptw_resp_bits_pf; // @[TLB.scala:318:7] wire io_ptw_resp_bits_gf_0 = io_ptw_resp_bits_gf; // @[TLB.scala:318:7] wire io_ptw_resp_bits_hr_0 = io_ptw_resp_bits_hr; // @[TLB.scala:318:7] wire io_ptw_resp_bits_hw_0 = io_ptw_resp_bits_hw; // @[TLB.scala:318:7] wire io_ptw_resp_bits_hx_0 = io_ptw_resp_bits_hx; // @[TLB.scala:318:7] wire [9:0] io_ptw_resp_bits_pte_reserved_for_future_0 = io_ptw_resp_bits_pte_reserved_for_future; // @[TLB.scala:318:7] wire [43:0] io_ptw_resp_bits_pte_ppn_0 = io_ptw_resp_bits_pte_ppn; // @[TLB.scala:318:7] wire [1:0] io_ptw_resp_bits_pte_reserved_for_software_0 = io_ptw_resp_bits_pte_reserved_for_software; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_d_0 = io_ptw_resp_bits_pte_d; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_a_0 = io_ptw_resp_bits_pte_a; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_g_0 = io_ptw_resp_bits_pte_g; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_u_0 = io_ptw_resp_bits_pte_u; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_x_0 = io_ptw_resp_bits_pte_x; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_w_0 = io_ptw_resp_bits_pte_w; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_r_0 = io_ptw_resp_bits_pte_r; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_v_0 = io_ptw_resp_bits_pte_v; // @[TLB.scala:318:7] wire [1:0] io_ptw_resp_bits_level_0 = io_ptw_resp_bits_level; // @[TLB.scala:318:7] wire io_ptw_resp_bits_homogeneous_0 = io_ptw_resp_bits_homogeneous; // @[TLB.scala:318:7] wire io_ptw_resp_bits_gpa_valid_0 = io_ptw_resp_bits_gpa_valid; // @[TLB.scala:318:7] wire [38:0] io_ptw_resp_bits_gpa_bits_0 = io_ptw_resp_bits_gpa_bits; // @[TLB.scala:318:7] wire io_ptw_resp_bits_gpa_is_pte_0 = io_ptw_resp_bits_gpa_is_pte; // @[TLB.scala:318:7] wire [3:0] io_ptw_ptbr_mode_0 = io_ptw_ptbr_mode; // @[TLB.scala:318:7] wire [43:0] io_ptw_ptbr_ppn_0 = io_ptw_ptbr_ppn; // @[TLB.scala:318:7] wire io_ptw_status_debug_0 = io_ptw_status_debug; // @[TLB.scala:318:7] wire io_ptw_status_cease_0 = io_ptw_status_cease; // @[TLB.scala:318:7] wire io_ptw_status_wfi_0 = io_ptw_status_wfi; // @[TLB.scala:318:7] wire [31:0] io_ptw_status_isa_0 = io_ptw_status_isa; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_dprv_0 = io_ptw_status_dprv; // @[TLB.scala:318:7] wire io_ptw_status_dv_0 = io_ptw_status_dv; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_prv_0 = io_ptw_status_prv; // @[TLB.scala:318:7] wire io_ptw_status_v_0 = io_ptw_status_v; // @[TLB.scala:318:7] wire io_ptw_status_sd_0 = io_ptw_status_sd; // @[TLB.scala:318:7] wire [22:0] io_ptw_status_zero2_0 = io_ptw_status_zero2; // @[TLB.scala:318:7] wire io_ptw_status_mpv_0 = io_ptw_status_mpv; // @[TLB.scala:318:7] wire io_ptw_status_gva_0 = io_ptw_status_gva; // @[TLB.scala:318:7] wire io_ptw_status_mbe_0 = io_ptw_status_mbe; // @[TLB.scala:318:7] wire io_ptw_status_sbe_0 = io_ptw_status_sbe; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_sxl_0 = io_ptw_status_sxl; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_uxl_0 = io_ptw_status_uxl; // @[TLB.scala:318:7] wire io_ptw_status_sd_rv32_0 = io_ptw_status_sd_rv32; // @[TLB.scala:318:7] wire [7:0] io_ptw_status_zero1_0 = io_ptw_status_zero1; // @[TLB.scala:318:7] wire io_ptw_status_tsr_0 = io_ptw_status_tsr; // @[TLB.scala:318:7] wire io_ptw_status_tw_0 = io_ptw_status_tw; // @[TLB.scala:318:7] wire io_ptw_status_tvm_0 = io_ptw_status_tvm; // @[TLB.scala:318:7] wire io_ptw_status_mxr_0 = io_ptw_status_mxr; // @[TLB.scala:318:7] wire io_ptw_status_sum_0 = io_ptw_status_sum; // @[TLB.scala:318:7] wire io_ptw_status_mprv_0 = io_ptw_status_mprv; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_xs_0 = io_ptw_status_xs; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_fs_0 = io_ptw_status_fs; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_mpp_0 = io_ptw_status_mpp; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_vs_0 = io_ptw_status_vs; // @[TLB.scala:318:7] wire io_ptw_status_spp_0 = io_ptw_status_spp; // @[TLB.scala:318:7] wire io_ptw_status_mpie_0 = io_ptw_status_mpie; // @[TLB.scala:318:7] wire io_ptw_status_ube_0 = io_ptw_status_ube; // @[TLB.scala:318:7] wire io_ptw_status_spie_0 = io_ptw_status_spie; // @[TLB.scala:318:7] wire io_ptw_status_upie_0 = io_ptw_status_upie; // @[TLB.scala:318:7] wire io_ptw_status_mie_0 = io_ptw_status_mie; // @[TLB.scala:318:7] wire io_ptw_status_hie_0 = io_ptw_status_hie; // @[TLB.scala:318:7] wire io_ptw_status_sie_0 = io_ptw_status_sie; // @[TLB.scala:318:7] wire io_ptw_status_uie_0 = io_ptw_status_uie; // @[TLB.scala:318:7] wire io_ptw_hstatus_spvp_0 = io_ptw_hstatus_spvp; // @[TLB.scala:318:7] wire io_ptw_hstatus_spv_0 = io_ptw_hstatus_spv; // @[TLB.scala:318:7] wire io_ptw_hstatus_gva_0 = io_ptw_hstatus_gva; // @[TLB.scala:318:7] wire io_ptw_gstatus_debug_0 = io_ptw_gstatus_debug; // @[TLB.scala:318:7] wire io_ptw_gstatus_cease_0 = io_ptw_gstatus_cease; // @[TLB.scala:318:7] wire io_ptw_gstatus_wfi_0 = io_ptw_gstatus_wfi; // @[TLB.scala:318:7] wire [31:0] io_ptw_gstatus_isa_0 = io_ptw_gstatus_isa; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_dprv_0 = io_ptw_gstatus_dprv; // @[TLB.scala:318:7] wire io_ptw_gstatus_dv_0 = io_ptw_gstatus_dv; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_prv_0 = io_ptw_gstatus_prv; // @[TLB.scala:318:7] wire io_ptw_gstatus_v_0 = io_ptw_gstatus_v; // @[TLB.scala:318:7] wire [22:0] io_ptw_gstatus_zero2_0 = io_ptw_gstatus_zero2; // @[TLB.scala:318:7] wire io_ptw_gstatus_mpv_0 = io_ptw_gstatus_mpv; // @[TLB.scala:318:7] wire io_ptw_gstatus_gva_0 = io_ptw_gstatus_gva; // @[TLB.scala:318:7] wire io_ptw_gstatus_mbe_0 = io_ptw_gstatus_mbe; // @[TLB.scala:318:7] wire io_ptw_gstatus_sbe_0 = io_ptw_gstatus_sbe; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_sxl_0 = io_ptw_gstatus_sxl; // @[TLB.scala:318:7] wire [7:0] io_ptw_gstatus_zero1_0 = io_ptw_gstatus_zero1; // @[TLB.scala:318:7] wire io_ptw_gstatus_tsr_0 = io_ptw_gstatus_tsr; // @[TLB.scala:318:7] wire io_ptw_gstatus_tw_0 = io_ptw_gstatus_tw; // @[TLB.scala:318:7] wire io_ptw_gstatus_tvm_0 = io_ptw_gstatus_tvm; // @[TLB.scala:318:7] wire io_ptw_gstatus_mxr_0 = io_ptw_gstatus_mxr; // @[TLB.scala:318:7] wire io_ptw_gstatus_sum_0 = io_ptw_gstatus_sum; // @[TLB.scala:318:7] wire io_ptw_gstatus_mprv_0 = io_ptw_gstatus_mprv; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_fs_0 = io_ptw_gstatus_fs; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_mpp_0 = io_ptw_gstatus_mpp; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_vs_0 = io_ptw_gstatus_vs; // @[TLB.scala:318:7] wire io_ptw_gstatus_spp_0 = io_ptw_gstatus_spp; // @[TLB.scala:318:7] wire io_ptw_gstatus_mpie_0 = io_ptw_gstatus_mpie; // @[TLB.scala:318:7] wire io_ptw_gstatus_ube_0 = io_ptw_gstatus_ube; // @[TLB.scala:318:7] wire io_ptw_gstatus_spie_0 = io_ptw_gstatus_spie; // @[TLB.scala:318:7] wire io_ptw_gstatus_upie_0 = io_ptw_gstatus_upie; // @[TLB.scala:318:7] wire io_ptw_gstatus_mie_0 = io_ptw_gstatus_mie; // @[TLB.scala:318:7] wire io_ptw_gstatus_hie_0 = io_ptw_gstatus_hie; // @[TLB.scala:318:7] wire io_ptw_gstatus_sie_0 = io_ptw_gstatus_sie; // @[TLB.scala:318:7] wire io_ptw_gstatus_uie_0 = io_ptw_gstatus_uie; // @[TLB.scala:318:7] wire io_ptw_pmp_0_cfg_l_0 = io_ptw_pmp_0_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_0_cfg_a_0 = io_ptw_pmp_0_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_0_cfg_x_0 = io_ptw_pmp_0_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_0_cfg_w_0 = io_ptw_pmp_0_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_0_cfg_r_0 = io_ptw_pmp_0_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_0_addr_0 = io_ptw_pmp_0_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_0_mask_0 = io_ptw_pmp_0_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_1_cfg_l_0 = io_ptw_pmp_1_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_1_cfg_a_0 = io_ptw_pmp_1_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_1_cfg_x_0 = io_ptw_pmp_1_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_1_cfg_w_0 = io_ptw_pmp_1_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_1_cfg_r_0 = io_ptw_pmp_1_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_1_addr_0 = io_ptw_pmp_1_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_1_mask_0 = io_ptw_pmp_1_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_2_cfg_l_0 = io_ptw_pmp_2_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_2_cfg_a_0 = io_ptw_pmp_2_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_2_cfg_x_0 = io_ptw_pmp_2_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_2_cfg_w_0 = io_ptw_pmp_2_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_2_cfg_r_0 = io_ptw_pmp_2_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_2_addr_0 = io_ptw_pmp_2_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_2_mask_0 = io_ptw_pmp_2_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_3_cfg_l_0 = io_ptw_pmp_3_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_3_cfg_a_0 = io_ptw_pmp_3_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_3_cfg_x_0 = io_ptw_pmp_3_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_3_cfg_w_0 = io_ptw_pmp_3_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_3_cfg_r_0 = io_ptw_pmp_3_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_3_addr_0 = io_ptw_pmp_3_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_3_mask_0 = io_ptw_pmp_3_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_4_cfg_l_0 = io_ptw_pmp_4_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_4_cfg_a_0 = io_ptw_pmp_4_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_4_cfg_x_0 = io_ptw_pmp_4_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_4_cfg_w_0 = io_ptw_pmp_4_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_4_cfg_r_0 = io_ptw_pmp_4_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_4_addr_0 = io_ptw_pmp_4_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_4_mask_0 = io_ptw_pmp_4_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_5_cfg_l_0 = io_ptw_pmp_5_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_5_cfg_a_0 = io_ptw_pmp_5_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_5_cfg_x_0 = io_ptw_pmp_5_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_5_cfg_w_0 = io_ptw_pmp_5_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_5_cfg_r_0 = io_ptw_pmp_5_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_5_addr_0 = io_ptw_pmp_5_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_5_mask_0 = io_ptw_pmp_5_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_6_cfg_l_0 = io_ptw_pmp_6_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_6_cfg_a_0 = io_ptw_pmp_6_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_6_cfg_x_0 = io_ptw_pmp_6_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_6_cfg_w_0 = io_ptw_pmp_6_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_6_cfg_r_0 = io_ptw_pmp_6_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_6_addr_0 = io_ptw_pmp_6_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_6_mask_0 = io_ptw_pmp_6_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_7_cfg_l_0 = io_ptw_pmp_7_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_7_cfg_a_0 = io_ptw_pmp_7_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_7_cfg_x_0 = io_ptw_pmp_7_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_7_cfg_w_0 = io_ptw_pmp_7_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_7_cfg_r_0 = io_ptw_pmp_7_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_7_addr_0 = io_ptw_pmp_7_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_7_mask_0 = io_ptw_pmp_7_mask; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_0_ren_0 = io_ptw_customCSRs_csrs_0_ren; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_0_wen_0 = io_ptw_customCSRs_csrs_0_wen; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_0_wdata_0 = io_ptw_customCSRs_csrs_0_wdata; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_0_value_0 = io_ptw_customCSRs_csrs_0_value; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_1_ren_0 = io_ptw_customCSRs_csrs_1_ren; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_1_wen_0 = io_ptw_customCSRs_csrs_1_wen; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_1_wdata_0 = io_ptw_customCSRs_csrs_1_wdata; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_1_value_0 = io_ptw_customCSRs_csrs_1_value; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_2_ren_0 = io_ptw_customCSRs_csrs_2_ren; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_2_wen_0 = io_ptw_customCSRs_csrs_2_wen; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_2_wdata_0 = io_ptw_customCSRs_csrs_2_wdata; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_2_value_0 = io_ptw_customCSRs_csrs_2_value; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_3_ren_0 = io_ptw_customCSRs_csrs_3_ren; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_3_wen_0 = io_ptw_customCSRs_csrs_3_wen; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_3_wdata_0 = io_ptw_customCSRs_csrs_3_wdata; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_3_value_0 = io_ptw_customCSRs_csrs_3_value; // @[TLB.scala:318:7] wire [6:0] hr_array = 7'h7F; // @[TLB.scala:524:21] wire [6:0] hw_array = 7'h7F; // @[TLB.scala:525:21] wire [6:0] hx_array = 7'h7F; // @[TLB.scala:526:21] wire [6:0] _must_alloc_array_T_8 = 7'h7F; // @[TLB.scala:596:19] wire [6:0] _gf_ld_array_T_1 = 7'h7F; // @[TLB.scala:600:50] wire [5:0] stage2_bypass = 6'h3F; // @[TLB.scala:523:27] wire [5:0] _hr_array_T_4 = 6'h3F; // @[TLB.scala:524:111] wire [5:0] _hw_array_T_1 = 6'h3F; // @[TLB.scala:525:55] wire [5:0] _hx_array_T_1 = 6'h3F; // @[TLB.scala:526:55] wire [5:0] _gpa_hits_hit_mask_T_4 = 6'h3F; // @[TLB.scala:606:88] wire [5:0] gpa_hits_hit_mask = 6'h3F; // @[TLB.scala:606:82] wire [5:0] _gpa_hits_T_1 = 6'h3F; // @[TLB.scala:607:16] wire [5:0] gpa_hits = 6'h3F; // @[TLB.scala:607:14] wire [2:0] _state_vec_WIRE_0 = 3'h0; // @[Replacement.scala:305:25] wire [2:0] _state_vec_WIRE_1 = 3'h0; // @[Replacement.scala:305:25] wire [2:0] _state_vec_WIRE_2 = 3'h0; // @[Replacement.scala:305:25] wire [2:0] _state_vec_WIRE_3 = 3'h0; // @[Replacement.scala:305:25] wire [6:0] _gf_ld_array_T_2 = 7'h0; // @[TLB.scala:600:46] wire [6:0] gf_ld_array = 7'h0; // @[TLB.scala:600:24] wire [6:0] _gf_st_array_T_1 = 7'h0; // @[TLB.scala:601:53] wire [6:0] gf_st_array = 7'h0; // @[TLB.scala:601:24] wire [6:0] _gf_inst_array_T = 7'h0; // @[TLB.scala:602:36] wire [6:0] gf_inst_array = 7'h0; // @[TLB.scala:602:26] wire [6:0] gpa_hits_need_gpa_mask = 7'h0; // @[TLB.scala:605:73] wire [6:0] _io_resp_gf_ld_T_1 = 7'h0; // @[TLB.scala:637:58] wire [6:0] _io_resp_gf_st_T_1 = 7'h0; // @[TLB.scala:638:65] wire [6:0] _io_resp_gf_inst_T = 7'h0; // @[TLB.scala:639:48] wire [63:0] io_ptw_customCSRs_csrs_0_sdata = 64'h0; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_1_sdata = 64'h0; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_2_sdata = 64'h0; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_3_sdata = 64'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_hstatus_vsxl = 2'h2; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_uxl = 2'h2; // @[TLB.scala:318:7] wire [38:0] io_sfence_bits_addr = 39'h0; // @[TLB.scala:318:7, :320:14] wire [1:0] io_ptw_gstatus_xs = 2'h3; // @[TLB.scala:318:7] wire io_ptw_req_bits_valid = 1'h1; // @[TLB.scala:318:7] wire io_ptw_gstatus_sd = 1'h1; // @[TLB.scala:318:7] wire priv_uses_vm = 1'h1; // @[TLB.scala:372:27] wire _vm_enabled_T_2 = 1'h1; // @[TLB.scala:399:64] wire _vsatp_mode_mismatch_T_2 = 1'h1; // @[TLB.scala:403:81] wire _homogeneous_T_59 = 1'h1; // @[TLBPermissions.scala:87:22] wire superpage_hits_ignore_2 = 1'h1; // @[TLB.scala:182:34] wire _superpage_hits_T_13 = 1'h1; // @[TLB.scala:183:40] wire hitsVec_ignore_2 = 1'h1; // @[TLB.scala:182:34] wire _hitsVec_T_37 = 1'h1; // @[TLB.scala:183:40] wire ppn_ignore_1 = 1'h1; // @[TLB.scala:197:34] wire _priv_rw_ok_T = 1'h1; // @[TLB.scala:513:24] wire _priv_rw_ok_T_1 = 1'h1; // @[TLB.scala:513:32] wire _stage2_bypass_T = 1'h1; // @[TLB.scala:523:42] wire _bad_va_T_1 = 1'h1; // @[TLB.scala:560:26] wire _gpa_hits_hit_mask_T_3 = 1'h1; // @[TLB.scala:606:107] wire _tlb_miss_T = 1'h1; // @[TLB.scala:613:32] wire _io_resp_gpa_page_T = 1'h1; // @[TLB.scala:657:20] wire _io_ptw_req_bits_valid_T = 1'h1; // @[TLB.scala:663:28] wire ignore_2 = 1'h1; // @[TLB.scala:182:34] wire [4:0] io_ptw_hstatus_zero1 = 5'h0; // @[TLB.scala:318:7] wire [5:0] io_ptw_hstatus_vgein = 6'h0; // @[TLB.scala:318:7] wire [5:0] _priv_rw_ok_T_6 = 6'h0; // @[TLB.scala:513:75] wire [5:0] _stage1_bypass_T = 6'h0; // @[TLB.scala:517:27] wire [5:0] stage1_bypass = 6'h0; // @[TLB.scala:517:61] wire [5:0] _gpa_hits_T = 6'h0; // @[TLB.scala:607:30] wire [1:0] io_req_bits_prv = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_hstatus_zero3 = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_hstatus_zero2 = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_0_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_1_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_2_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_3_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_4_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_5_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_6_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_7_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [8:0] io_ptw_hstatus_zero5 = 9'h0; // @[TLB.scala:318:7, :320:14] wire [29:0] io_ptw_hstatus_zero6 = 30'h0; // @[TLB.scala:318:7, :320:14] wire [43:0] io_ptw_hgatp_ppn = 44'h0; // @[TLB.scala:318:7, :320:14] wire [43:0] io_ptw_vsatp_ppn = 44'h0; // @[TLB.scala:318:7, :320:14] wire [3:0] io_ptw_hgatp_mode = 4'h0; // @[TLB.scala:318:7, :320:14] wire [3:0] io_ptw_vsatp_mode = 4'h0; // @[TLB.scala:318:7, :320:14] wire [15:0] io_ptw_ptbr_asid = 16'h0; // @[TLB.scala:318:7, :320:14, :373:17] wire [15:0] io_ptw_hgatp_asid = 16'h0; // @[TLB.scala:318:7, :320:14, :373:17] wire [15:0] io_ptw_vsatp_asid = 16'h0; // @[TLB.scala:318:7, :320:14, :373:17] wire [15:0] satp_asid = 16'h0; // @[TLB.scala:318:7, :320:14, :373:17] wire io_req_bits_passthrough = 1'h0; // @[TLB.scala:318:7] wire io_req_bits_v = 1'h0; // @[TLB.scala:318:7] wire io_resp_gpa_is_pte = 1'h0; // @[TLB.scala:318:7] wire io_resp_gf_ld = 1'h0; // @[TLB.scala:318:7] wire io_resp_gf_st = 1'h0; // @[TLB.scala:318:7] wire io_resp_gf_inst = 1'h0; // @[TLB.scala:318:7] wire io_resp_ma_inst = 1'h0; // @[TLB.scala:318:7] wire io_sfence_bits_rs1 = 1'h0; // @[TLB.scala:318:7] wire io_sfence_bits_rs2 = 1'h0; // @[TLB.scala:318:7] wire io_sfence_bits_asid = 1'h0; // @[TLB.scala:318:7] wire io_sfence_bits_hv = 1'h0; // @[TLB.scala:318:7] wire io_sfence_bits_hg = 1'h0; // @[TLB.scala:318:7] wire io_ptw_req_bits_bits_vstage1 = 1'h0; // @[TLB.scala:318:7] wire io_ptw_req_bits_bits_stage2 = 1'h0; // @[TLB.scala:318:7] wire io_ptw_resp_bits_fragmented_superpage = 1'h0; // @[TLB.scala:318:7] wire io_ptw_hstatus_vtsr = 1'h0; // @[TLB.scala:318:7] wire io_ptw_hstatus_vtw = 1'h0; // @[TLB.scala:318:7] wire io_ptw_hstatus_vtvm = 1'h0; // @[TLB.scala:318:7] wire io_ptw_hstatus_hu = 1'h0; // @[TLB.scala:318:7] wire io_ptw_hstatus_vsbe = 1'h0; // @[TLB.scala:318:7] wire io_ptw_gstatus_sd_rv32 = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_0_stall = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_0_set = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_1_stall = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_1_set = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_2_stall = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_2_set = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_3_stall = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_3_set = 1'h0; // @[TLB.scala:318:7] wire io_kill = 1'h0; // @[TLB.scala:318:7] wire priv_v = 1'h0; // @[TLB.scala:369:34] wire priv_s = 1'h0; // @[TLB.scala:370:20] wire _vstage1_en_T = 1'h0; // @[TLB.scala:376:38] wire _vstage1_en_T_1 = 1'h0; // @[TLB.scala:376:68] wire vstage1_en = 1'h0; // @[TLB.scala:376:48] wire _stage2_en_T = 1'h0; // @[TLB.scala:378:38] wire _stage2_en_T_1 = 1'h0; // @[TLB.scala:378:68] wire stage2_en = 1'h0; // @[TLB.scala:378:48] wire _vsatp_mode_mismatch_T = 1'h0; // @[TLB.scala:403:52] wire _vsatp_mode_mismatch_T_1 = 1'h0; // @[TLB.scala:403:37] wire vsatp_mode_mismatch = 1'h0; // @[TLB.scala:403:78] wire _superpage_hits_ignore_T = 1'h0; // @[TLB.scala:182:28] wire superpage_hits_ignore = 1'h0; // @[TLB.scala:182:34] wire _hitsVec_ignore_T = 1'h0; // @[TLB.scala:182:28] wire hitsVec_ignore = 1'h0; // @[TLB.scala:182:34] wire _hitsVec_ignore_T_3 = 1'h0; // @[TLB.scala:182:28] wire hitsVec_ignore_3 = 1'h0; // @[TLB.scala:182:34] wire refill_v = 1'h0; // @[TLB.scala:448:33] wire newEntry_ae_stage2 = 1'h0; // @[TLB.scala:449:24] wire newEntry_fragmented_superpage = 1'h0; // @[TLB.scala:449:24] wire _newEntry_ae_stage2_T_1 = 1'h0; // @[TLB.scala:456:84] wire _waddr_T = 1'h0; // @[TLB.scala:477:45] wire _mxr_T = 1'h0; // @[TLB.scala:518:36] wire cmd_readx = 1'h0; // @[TLB.scala:575:37] wire _gf_ld_array_T = 1'h0; // @[TLB.scala:600:32] wire _gf_st_array_T = 1'h0; // @[TLB.scala:601:32] wire _multipleHits_T_5 = 1'h0; // @[Misc.scala:183:37] wire _multipleHits_T_14 = 1'h0; // @[Misc.scala:183:37] wire _io_req_ready_T; // @[TLB.scala:631:25] wire _io_resp_gf_ld_T = 1'h0; // @[TLB.scala:637:29] wire _io_resp_gf_ld_T_2 = 1'h0; // @[TLB.scala:637:66] wire _io_resp_gf_ld_T_3 = 1'h0; // @[TLB.scala:637:42] wire _io_resp_gf_st_T = 1'h0; // @[TLB.scala:638:29] wire _io_resp_gf_st_T_2 = 1'h0; // @[TLB.scala:638:73] wire _io_resp_gf_st_T_3 = 1'h0; // @[TLB.scala:638:49] wire _io_resp_gf_inst_T_1 = 1'h0; // @[TLB.scala:639:56] wire _io_resp_gf_inst_T_2 = 1'h0; // @[TLB.scala:639:30] wire _io_resp_gpa_is_pte_T = 1'h0; // @[TLB.scala:655:36] wire _r_superpage_repl_addr_T_3 = 1'h0; // @[TLB.scala:757:8] wire hv = 1'h0; // @[TLB.scala:721:36] wire hg = 1'h0; // @[TLB.scala:722:36] wire hv_1 = 1'h0; // @[TLB.scala:721:36] wire hg_1 = 1'h0; // @[TLB.scala:722:36] wire hv_2 = 1'h0; // @[TLB.scala:721:36] wire hg_2 = 1'h0; // @[TLB.scala:722:36] wire hv_3 = 1'h0; // @[TLB.scala:721:36] wire hg_3 = 1'h0; // @[TLB.scala:722:36] wire hv_4 = 1'h0; // @[TLB.scala:721:36] wire hg_4 = 1'h0; // @[TLB.scala:722:36] wire hv_5 = 1'h0; // @[TLB.scala:721:36] wire hg_5 = 1'h0; // @[TLB.scala:722:36] wire hv_6 = 1'h0; // @[TLB.scala:721:36] wire hg_6 = 1'h0; // @[TLB.scala:722:36] wire hv_7 = 1'h0; // @[TLB.scala:721:36] wire hg_7 = 1'h0; // @[TLB.scala:722:36] wire hv_8 = 1'h0; // @[TLB.scala:721:36] wire hg_8 = 1'h0; // @[TLB.scala:722:36] wire hv_9 = 1'h0; // @[TLB.scala:721:36] wire hg_9 = 1'h0; // @[TLB.scala:722:36] wire hv_10 = 1'h0; // @[TLB.scala:721:36] wire hg_10 = 1'h0; // @[TLB.scala:722:36] wire hv_11 = 1'h0; // @[TLB.scala:721:36] wire hg_11 = 1'h0; // @[TLB.scala:722:36] wire hv_12 = 1'h0; // @[TLB.scala:721:36] wire hg_12 = 1'h0; // @[TLB.scala:722:36] wire hv_13 = 1'h0; // @[TLB.scala:721:36] wire hg_13 = 1'h0; // @[TLB.scala:722:36] wire hv_14 = 1'h0; // @[TLB.scala:721:36] wire hg_14 = 1'h0; // @[TLB.scala:722:36] wire hv_15 = 1'h0; // @[TLB.scala:721:36] wire hg_15 = 1'h0; // @[TLB.scala:722:36] wire hv_16 = 1'h0; // @[TLB.scala:721:36] wire hg_16 = 1'h0; // @[TLB.scala:722:36] wire _ignore_T = 1'h0; // @[TLB.scala:182:28] wire ignore = 1'h0; // @[TLB.scala:182:34] wire hv_17 = 1'h0; // @[TLB.scala:721:36] wire hg_17 = 1'h0; // @[TLB.scala:722:36] wire _ignore_T_3 = 1'h0; // @[TLB.scala:182:28] wire ignore_3 = 1'h0; // @[TLB.scala:182:34] wire [1:0] io_resp_size = io_req_bits_size_0; // @[TLB.scala:318:7] wire [4:0] io_resp_cmd = io_req_bits_cmd_0; // @[TLB.scala:318:7] wire _io_resp_miss_T_2; // @[TLB.scala:651:64] wire [31:0] _io_resp_paddr_T_1; // @[TLB.scala:652:23] wire [39:0] _io_resp_gpa_T; // @[TLB.scala:659:8] wire _io_resp_pf_ld_T_3; // @[TLB.scala:633:41] wire _io_resp_pf_st_T_3; // @[TLB.scala:634:48] wire _io_resp_pf_inst_T_2; // @[TLB.scala:635:29] wire _io_resp_ae_ld_T_1; // @[TLB.scala:641:41] wire _io_resp_ae_st_T_1; // @[TLB.scala:642:41] wire _io_resp_ae_inst_T_2; // @[TLB.scala:643:41] wire _io_resp_ma_ld_T; // @[TLB.scala:645:31] wire _io_resp_ma_st_T; // @[TLB.scala:646:31] wire _io_resp_cacheable_T_1; // @[TLB.scala:648:41] wire _io_resp_must_alloc_T_1; // @[TLB.scala:649:51] wire _io_resp_prefetchable_T_2; // @[TLB.scala:650:59] wire _io_ptw_req_valid_T; // @[TLB.scala:662:29] wire do_refill = io_ptw_resp_valid_0; // @[TLB.scala:318:7, :408:29] wire newEntry_ae_ptw = io_ptw_resp_bits_ae_ptw_0; // @[TLB.scala:318:7, :449:24] wire newEntry_ae_final = io_ptw_resp_bits_ae_final_0; // @[TLB.scala:318:7, :449:24] wire newEntry_pf = io_ptw_resp_bits_pf_0; // @[TLB.scala:318:7, :449:24] wire newEntry_gf = io_ptw_resp_bits_gf_0; // @[TLB.scala:318:7, :449:24] wire newEntry_hr = io_ptw_resp_bits_hr_0; // @[TLB.scala:318:7, :449:24] wire newEntry_hw = io_ptw_resp_bits_hw_0; // @[TLB.scala:318:7, :449:24] wire newEntry_hx = io_ptw_resp_bits_hx_0; // @[TLB.scala:318:7, :449:24] wire newEntry_u = io_ptw_resp_bits_pte_u_0; // @[TLB.scala:318:7, :449:24] wire [1:0] _special_entry_level_T = io_ptw_resp_bits_level_0; // @[package.scala:163:13] wire [3:0] satp_mode = io_ptw_ptbr_mode_0; // @[TLB.scala:318:7, :373:17] wire [43:0] satp_ppn = io_ptw_ptbr_ppn_0; // @[TLB.scala:318:7, :373:17] wire mxr = io_ptw_status_mxr_0; // @[TLB.scala:318:7, :518:31] wire sum = io_ptw_status_sum_0; // @[TLB.scala:318:7, :510:16] wire io_req_ready_0; // @[TLB.scala:318:7] wire io_resp_pf_ld; // @[TLB.scala:318:7] wire io_resp_pf_st; // @[TLB.scala:318:7] wire io_resp_pf_inst; // @[TLB.scala:318:7] wire io_resp_ae_ld; // @[TLB.scala:318:7] wire io_resp_ae_st; // @[TLB.scala:318:7] wire io_resp_ae_inst; // @[TLB.scala:318:7] wire io_resp_ma_ld; // @[TLB.scala:318:7] wire io_resp_ma_st; // @[TLB.scala:318:7] wire io_resp_miss_0; // @[TLB.scala:318:7] wire [31:0] io_resp_paddr_0; // @[TLB.scala:318:7] wire [39:0] io_resp_gpa; // @[TLB.scala:318:7] wire io_resp_cacheable; // @[TLB.scala:318:7] wire io_resp_must_alloc; // @[TLB.scala:318:7] wire io_resp_prefetchable; // @[TLB.scala:318:7] wire [26:0] io_ptw_req_bits_bits_addr_0; // @[TLB.scala:318:7] wire io_ptw_req_bits_bits_need_gpa_0; // @[TLB.scala:318:7] wire io_ptw_req_valid_0; // @[TLB.scala:318:7] wire [26:0] vpn = io_req_bits_vaddr_0[38:12]; // @[TLB.scala:318:7, :335:30] wire [26:0] _ppn_T_5 = vpn; // @[TLB.scala:198:28, :335:30] wire [1:0] memIdx = vpn[1:0]; // @[package.scala:163:13] reg [1:0] sectored_entries_0_0_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_0_0_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_0_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_0_data_0; // @[TLB.scala:339:29] reg sectored_entries_0_0_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_0_1_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_0_1_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_1_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_1_data_0; // @[TLB.scala:339:29] reg sectored_entries_0_1_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_0_2_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_0_2_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_2_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_2_data_0; // @[TLB.scala:339:29] reg sectored_entries_0_2_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_0_3_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_0_3_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_3_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_3_data_0; // @[TLB.scala:339:29] reg sectored_entries_0_3_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_1_0_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_1_0_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_1_0_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_1_0_data_0; // @[TLB.scala:339:29] reg sectored_entries_1_0_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_1_1_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_1_1_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_1_1_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_1_1_data_0; // @[TLB.scala:339:29] reg sectored_entries_1_1_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_1_2_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_1_2_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_1_2_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_1_2_data_0; // @[TLB.scala:339:29] reg sectored_entries_1_2_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_1_3_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_1_3_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_1_3_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_1_3_data_0; // @[TLB.scala:339:29] reg sectored_entries_1_3_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_2_0_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_2_0_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_2_0_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_2_0_data_0; // @[TLB.scala:339:29] reg sectored_entries_2_0_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_2_1_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_2_1_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_2_1_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_2_1_data_0; // @[TLB.scala:339:29] reg sectored_entries_2_1_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_2_2_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_2_2_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_2_2_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_2_2_data_0; // @[TLB.scala:339:29] reg sectored_entries_2_2_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_2_3_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_2_3_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_2_3_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_2_3_data_0; // @[TLB.scala:339:29] reg sectored_entries_2_3_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_3_0_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_3_0_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_3_0_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_3_0_data_0; // @[TLB.scala:339:29] reg sectored_entries_3_0_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_3_1_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_3_1_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_3_1_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_3_1_data_0; // @[TLB.scala:339:29] reg sectored_entries_3_1_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_3_2_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_3_2_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_3_2_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_3_2_data_0; // @[TLB.scala:339:29] reg sectored_entries_3_2_valid_0; // @[TLB.scala:339:29] reg [1:0] sectored_entries_3_3_level; // @[TLB.scala:339:29] reg [26:0] sectored_entries_3_3_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_3_3_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_3_3_data_0; // @[TLB.scala:339:29] reg sectored_entries_3_3_valid_0; // @[TLB.scala:339:29] reg [1:0] superpage_entries_0_level; // @[TLB.scala:341:30] reg [26:0] superpage_entries_0_tag_vpn; // @[TLB.scala:341:30] reg superpage_entries_0_tag_v; // @[TLB.scala:341:30] reg [41:0] superpage_entries_0_data_0; // @[TLB.scala:341:30] wire [41:0] _entries_WIRE_9 = superpage_entries_0_data_0; // @[TLB.scala:170:77, :341:30] reg superpage_entries_0_valid_0; // @[TLB.scala:341:30] wire _r_superpage_repl_addr_T = superpage_entries_0_valid_0; // @[TLB.scala:341:30, :757:16] reg [1:0] special_entry_level; // @[TLB.scala:346:56] reg [26:0] special_entry_tag_vpn; // @[TLB.scala:346:56] reg special_entry_tag_v; // @[TLB.scala:346:56] reg [41:0] special_entry_data_0; // @[TLB.scala:346:56] wire [41:0] _mpu_ppn_WIRE_1 = special_entry_data_0; // @[TLB.scala:170:77, :346:56] wire [41:0] _entries_WIRE_11 = special_entry_data_0; // @[TLB.scala:170:77, :346:56] reg special_entry_valid_0; // @[TLB.scala:346:56] reg [1:0] state; // @[TLB.scala:352:22] reg [26:0] r_refill_tag; // @[TLB.scala:354:25] assign io_ptw_req_bits_bits_addr_0 = r_refill_tag; // @[TLB.scala:318:7, :354:25] reg [1:0] r_sectored_repl_addr; // @[TLB.scala:356:33] reg r_sectored_hit_valid; // @[TLB.scala:357:27] reg [1:0] r_sectored_hit_bits; // @[TLB.scala:357:27] reg r_superpage_hit_valid; // @[TLB.scala:358:28] reg r_need_gpa; // @[TLB.scala:361:23] assign io_ptw_req_bits_bits_need_gpa_0 = r_need_gpa; // @[TLB.scala:318:7, :361:23] reg r_gpa_valid; // @[TLB.scala:362:24] reg [38:0] r_gpa; // @[TLB.scala:363:18] reg [26:0] r_gpa_vpn; // @[TLB.scala:364:22] reg r_gpa_is_pte; // @[TLB.scala:365:25] wire _stage1_en_T = satp_mode[3]; // @[TLB.scala:373:17, :374:41] wire stage1_en = _stage1_en_T; // @[TLB.scala:374:{29,41}] wire _vm_enabled_T = stage1_en; // @[TLB.scala:374:29, :399:31] wire _vm_enabled_T_1 = _vm_enabled_T; // @[TLB.scala:399:{31,45}] wire vm_enabled = _vm_enabled_T_1; // @[TLB.scala:399:{45,61}] wire _mpu_ppn_T = vm_enabled; // @[TLB.scala:399:61, :413:32] wire _tlb_miss_T_1 = vm_enabled; // @[TLB.scala:399:61, :613:29] wire [19:0] refill_ppn = io_ptw_resp_bits_pte_ppn_0[19:0]; // @[TLB.scala:318:7, :406:44] wire [19:0] newEntry_ppn = io_ptw_resp_bits_pte_ppn_0[19:0]; // @[TLB.scala:318:7, :406:44, :449:24] wire _mpu_priv_T = do_refill; // @[TLB.scala:408:29, :415:52] wire _io_resp_miss_T = do_refill; // @[TLB.scala:408:29, :651:29] wire _T_25 = state == 2'h1; // @[package.scala:16:47] wire _invalidate_refill_T; // @[package.scala:16:47] assign _invalidate_refill_T = _T_25; // @[package.scala:16:47] assign _io_ptw_req_valid_T = _T_25; // @[package.scala:16:47] wire _invalidate_refill_T_1 = &state; // @[package.scala:16:47] wire _invalidate_refill_T_2 = _invalidate_refill_T | _invalidate_refill_T_1; // @[package.scala:16:47, :81:59] wire invalidate_refill = _invalidate_refill_T_2 | io_sfence_valid_0; // @[package.scala:81:59] wire [19:0] _mpu_ppn_T_23; // @[TLB.scala:170:77] wire _mpu_ppn_T_22; // @[TLB.scala:170:77] wire _mpu_ppn_T_21; // @[TLB.scala:170:77] wire _mpu_ppn_T_20; // @[TLB.scala:170:77] wire _mpu_ppn_T_19; // @[TLB.scala:170:77] wire _mpu_ppn_T_18; // @[TLB.scala:170:77] wire _mpu_ppn_T_17; // @[TLB.scala:170:77] wire _mpu_ppn_T_16; // @[TLB.scala:170:77] wire _mpu_ppn_T_15; // @[TLB.scala:170:77] wire _mpu_ppn_T_14; // @[TLB.scala:170:77] wire _mpu_ppn_T_13; // @[TLB.scala:170:77] wire _mpu_ppn_T_12; // @[TLB.scala:170:77] wire _mpu_ppn_T_11; // @[TLB.scala:170:77] wire _mpu_ppn_T_10; // @[TLB.scala:170:77] wire _mpu_ppn_T_9; // @[TLB.scala:170:77] wire _mpu_ppn_T_8; // @[TLB.scala:170:77] wire _mpu_ppn_T_7; // @[TLB.scala:170:77] wire _mpu_ppn_T_6; // @[TLB.scala:170:77] wire _mpu_ppn_T_5; // @[TLB.scala:170:77] wire _mpu_ppn_T_4; // @[TLB.scala:170:77] wire _mpu_ppn_T_3; // @[TLB.scala:170:77] wire _mpu_ppn_T_2; // @[TLB.scala:170:77] wire _mpu_ppn_T_1; // @[TLB.scala:170:77] assign _mpu_ppn_T_1 = _mpu_ppn_WIRE_1[0]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_fragmented_superpage = _mpu_ppn_T_1; // @[TLB.scala:170:77] assign _mpu_ppn_T_2 = _mpu_ppn_WIRE_1[1]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_c = _mpu_ppn_T_2; // @[TLB.scala:170:77] assign _mpu_ppn_T_3 = _mpu_ppn_WIRE_1[2]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_eff = _mpu_ppn_T_3; // @[TLB.scala:170:77] assign _mpu_ppn_T_4 = _mpu_ppn_WIRE_1[3]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_paa = _mpu_ppn_T_4; // @[TLB.scala:170:77] assign _mpu_ppn_T_5 = _mpu_ppn_WIRE_1[4]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_pal = _mpu_ppn_T_5; // @[TLB.scala:170:77] assign _mpu_ppn_T_6 = _mpu_ppn_WIRE_1[5]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_ppp = _mpu_ppn_T_6; // @[TLB.scala:170:77] assign _mpu_ppn_T_7 = _mpu_ppn_WIRE_1[6]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_pr = _mpu_ppn_T_7; // @[TLB.scala:170:77] assign _mpu_ppn_T_8 = _mpu_ppn_WIRE_1[7]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_px = _mpu_ppn_T_8; // @[TLB.scala:170:77] assign _mpu_ppn_T_9 = _mpu_ppn_WIRE_1[8]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_pw = _mpu_ppn_T_9; // @[TLB.scala:170:77] assign _mpu_ppn_T_10 = _mpu_ppn_WIRE_1[9]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_hr = _mpu_ppn_T_10; // @[TLB.scala:170:77] assign _mpu_ppn_T_11 = _mpu_ppn_WIRE_1[10]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_hx = _mpu_ppn_T_11; // @[TLB.scala:170:77] assign _mpu_ppn_T_12 = _mpu_ppn_WIRE_1[11]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_hw = _mpu_ppn_T_12; // @[TLB.scala:170:77] assign _mpu_ppn_T_13 = _mpu_ppn_WIRE_1[12]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_sr = _mpu_ppn_T_13; // @[TLB.scala:170:77] assign _mpu_ppn_T_14 = _mpu_ppn_WIRE_1[13]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_sx = _mpu_ppn_T_14; // @[TLB.scala:170:77] assign _mpu_ppn_T_15 = _mpu_ppn_WIRE_1[14]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_sw = _mpu_ppn_T_15; // @[TLB.scala:170:77] assign _mpu_ppn_T_16 = _mpu_ppn_WIRE_1[15]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_gf = _mpu_ppn_T_16; // @[TLB.scala:170:77] assign _mpu_ppn_T_17 = _mpu_ppn_WIRE_1[16]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_pf = _mpu_ppn_T_17; // @[TLB.scala:170:77] assign _mpu_ppn_T_18 = _mpu_ppn_WIRE_1[17]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_ae_stage2 = _mpu_ppn_T_18; // @[TLB.scala:170:77] assign _mpu_ppn_T_19 = _mpu_ppn_WIRE_1[18]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_ae_final = _mpu_ppn_T_19; // @[TLB.scala:170:77] assign _mpu_ppn_T_20 = _mpu_ppn_WIRE_1[19]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_ae_ptw = _mpu_ppn_T_20; // @[TLB.scala:170:77] assign _mpu_ppn_T_21 = _mpu_ppn_WIRE_1[20]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_g = _mpu_ppn_T_21; // @[TLB.scala:170:77] assign _mpu_ppn_T_22 = _mpu_ppn_WIRE_1[21]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_u = _mpu_ppn_T_22; // @[TLB.scala:170:77] assign _mpu_ppn_T_23 = _mpu_ppn_WIRE_1[41:22]; // @[TLB.scala:170:77] wire [19:0] _mpu_ppn_WIRE_ppn = _mpu_ppn_T_23; // @[TLB.scala:170:77] wire [1:0] mpu_ppn_res = _mpu_ppn_barrier_io_y_ppn[19:18]; // @[package.scala:267:25] wire _GEN = special_entry_level == 2'h0; // @[TLB.scala:197:28, :346:56] wire _mpu_ppn_ignore_T; // @[TLB.scala:197:28] assign _mpu_ppn_ignore_T = _GEN; // @[TLB.scala:197:28] wire _hitsVec_ignore_T_4; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_4 = _GEN; // @[TLB.scala:182:28, :197:28] wire _ppn_ignore_T_2; // @[TLB.scala:197:28] assign _ppn_ignore_T_2 = _GEN; // @[TLB.scala:197:28] wire _ignore_T_4; // @[TLB.scala:182:28] assign _ignore_T_4 = _GEN; // @[TLB.scala:182:28, :197:28] wire mpu_ppn_ignore = _mpu_ppn_ignore_T; // @[TLB.scala:197:{28,34}] wire [26:0] _mpu_ppn_T_24 = mpu_ppn_ignore ? vpn : 27'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [26:0] _mpu_ppn_T_25 = {_mpu_ppn_T_24[26:20], _mpu_ppn_T_24[19:0] | _mpu_ppn_barrier_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _mpu_ppn_T_26 = _mpu_ppn_T_25[17:9]; // @[TLB.scala:198:{47,58}] wire [10:0] _mpu_ppn_T_27 = {mpu_ppn_res, _mpu_ppn_T_26}; // @[TLB.scala:195:26, :198:{18,58}] wire _mpu_ppn_ignore_T_1 = ~(special_entry_level[1]); // @[TLB.scala:197:28, :346:56] wire mpu_ppn_ignore_1 = _mpu_ppn_ignore_T_1; // @[TLB.scala:197:{28,34}] wire [26:0] _mpu_ppn_T_28 = mpu_ppn_ignore_1 ? vpn : 27'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [26:0] _mpu_ppn_T_29 = {_mpu_ppn_T_28[26:20], _mpu_ppn_T_28[19:0] | _mpu_ppn_barrier_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _mpu_ppn_T_30 = _mpu_ppn_T_29[8:0]; // @[TLB.scala:198:{47,58}] wire [19:0] _mpu_ppn_T_31 = {_mpu_ppn_T_27, _mpu_ppn_T_30}; // @[TLB.scala:198:{18,58}] wire [27:0] _mpu_ppn_T_32 = io_req_bits_vaddr_0[39:12]; // @[TLB.scala:318:7, :413:146] wire [27:0] _mpu_ppn_T_33 = _mpu_ppn_T ? {8'h0, _mpu_ppn_T_31} : _mpu_ppn_T_32; // @[TLB.scala:198:18, :413:{20,32,146}] wire [27:0] mpu_ppn = do_refill ? {8'h0, refill_ppn} : _mpu_ppn_T_33; // @[TLB.scala:406:44, :408:29, :412:20, :413:20] wire [11:0] _mpu_physaddr_T = io_req_bits_vaddr_0[11:0]; // @[TLB.scala:318:7, :414:52] wire [11:0] _io_resp_paddr_T = io_req_bits_vaddr_0[11:0]; // @[TLB.scala:318:7, :414:52, :652:46] wire [11:0] _io_resp_gpa_offset_T_1 = io_req_bits_vaddr_0[11:0]; // @[TLB.scala:318:7, :414:52, :658:82] wire [39:0] mpu_physaddr = {mpu_ppn, _mpu_physaddr_T}; // @[TLB.scala:412:20, :414:{25,52}] wire [39:0] _homogeneous_T = mpu_physaddr; // @[TLB.scala:414:25] wire [39:0] _homogeneous_T_67 = mpu_physaddr; // @[TLB.scala:414:25] wire [39:0] _deny_access_to_debug_T_1 = mpu_physaddr; // @[TLB.scala:414:25] wire _mpu_priv_T_1 = _mpu_priv_T; // @[TLB.scala:415:{38,52}] wire [2:0] _mpu_priv_T_2 = {io_ptw_status_debug_0, 2'h0}; // @[TLB.scala:318:7, :415:103] wire [2:0] mpu_priv = _mpu_priv_T_1 ? 3'h1 : _mpu_priv_T_2; // @[TLB.scala:415:{27,38,103}] wire cacheable; // @[TLB.scala:425:41] wire newEntry_c = cacheable; // @[TLB.scala:425:41, :449:24] wire [40:0] _homogeneous_T_1 = {1'h0, _homogeneous_T}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_2 = _homogeneous_T_1 & 41'h1FFFFFFE000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_3 = _homogeneous_T_2; // @[Parameters.scala:137:46] wire _homogeneous_T_4 = _homogeneous_T_3 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_50 = _homogeneous_T_4; // @[TLBPermissions.scala:101:65] wire [39:0] _GEN_0 = {mpu_physaddr[39:14], mpu_physaddr[13:0] ^ 14'h3000}; // @[TLB.scala:414:25] wire [39:0] _homogeneous_T_5; // @[Parameters.scala:137:31] assign _homogeneous_T_5 = _GEN_0; // @[Parameters.scala:137:31] wire [39:0] _homogeneous_T_72; // @[Parameters.scala:137:31] assign _homogeneous_T_72 = _GEN_0; // @[Parameters.scala:137:31] wire [40:0] _homogeneous_T_6 = {1'h0, _homogeneous_T_5}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_7 = _homogeneous_T_6 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_8 = _homogeneous_T_7; // @[Parameters.scala:137:46] wire _homogeneous_T_9 = _homogeneous_T_8 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _GEN_1 = {mpu_physaddr[39:17], mpu_physaddr[16:0] ^ 17'h10000}; // @[TLB.scala:414:25] wire [39:0] _homogeneous_T_10; // @[Parameters.scala:137:31] assign _homogeneous_T_10 = _GEN_1; // @[Parameters.scala:137:31] wire [39:0] _homogeneous_T_60; // @[Parameters.scala:137:31] assign _homogeneous_T_60 = _GEN_1; // @[Parameters.scala:137:31] wire [39:0] _homogeneous_T_77; // @[Parameters.scala:137:31] assign _homogeneous_T_77 = _GEN_1; // @[Parameters.scala:137:31] wire [39:0] _homogeneous_T_109; // @[Parameters.scala:137:31] assign _homogeneous_T_109 = _GEN_1; // @[Parameters.scala:137:31] wire [39:0] _homogeneous_T_116; // @[Parameters.scala:137:31] assign _homogeneous_T_116 = _GEN_1; // @[Parameters.scala:137:31] wire [40:0] _homogeneous_T_11 = {1'h0, _homogeneous_T_10}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_12 = _homogeneous_T_11 & 41'h1FFFFFF0000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_13 = _homogeneous_T_12; // @[Parameters.scala:137:46] wire _homogeneous_T_14 = _homogeneous_T_13 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _homogeneous_T_15 = {mpu_physaddr[39:21], mpu_physaddr[20:0] ^ 21'h100000}; // @[TLB.scala:414:25] wire [40:0] _homogeneous_T_16 = {1'h0, _homogeneous_T_15}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_17 = _homogeneous_T_16 & 41'h1FFFFFEF000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_18 = _homogeneous_T_17; // @[Parameters.scala:137:46] wire _homogeneous_T_19 = _homogeneous_T_18 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _homogeneous_T_20 = {mpu_physaddr[39:26], mpu_physaddr[25:0] ^ 26'h2000000}; // @[TLB.scala:414:25] wire [40:0] _homogeneous_T_21 = {1'h0, _homogeneous_T_20}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_22 = _homogeneous_T_21 & 41'h1FFFFFF0000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_23 = _homogeneous_T_22; // @[Parameters.scala:137:46] wire _homogeneous_T_24 = _homogeneous_T_23 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _homogeneous_T_25 = {mpu_physaddr[39:26], mpu_physaddr[25:0] ^ 26'h2010000}; // @[TLB.scala:414:25] wire [40:0] _homogeneous_T_26 = {1'h0, _homogeneous_T_25}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_27 = _homogeneous_T_26 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_28 = _homogeneous_T_27; // @[Parameters.scala:137:46] wire _homogeneous_T_29 = _homogeneous_T_28 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _GEN_2 = {mpu_physaddr[39:28], mpu_physaddr[27:0] ^ 28'h8000000}; // @[TLB.scala:414:25] wire [39:0] _homogeneous_T_30; // @[Parameters.scala:137:31] assign _homogeneous_T_30 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _homogeneous_T_82; // @[Parameters.scala:137:31] assign _homogeneous_T_82 = _GEN_2; // @[Parameters.scala:137:31] wire [39:0] _homogeneous_T_97; // @[Parameters.scala:137:31] assign _homogeneous_T_97 = _GEN_2; // @[Parameters.scala:137:31] wire [40:0] _homogeneous_T_31 = {1'h0, _homogeneous_T_30}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_32 = _homogeneous_T_31 & 41'h1FFFFFF0000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_33 = _homogeneous_T_32; // @[Parameters.scala:137:46] wire _homogeneous_T_34 = _homogeneous_T_33 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _homogeneous_T_35 = {mpu_physaddr[39:28], mpu_physaddr[27:0] ^ 28'hC000000}; // @[TLB.scala:414:25] wire [40:0] _homogeneous_T_36 = {1'h0, _homogeneous_T_35}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_37 = _homogeneous_T_36 & 41'h1FFFC000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_38 = _homogeneous_T_37; // @[Parameters.scala:137:46] wire _homogeneous_T_39 = _homogeneous_T_38 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _homogeneous_T_40 = {mpu_physaddr[39:29], mpu_physaddr[28:0] ^ 29'h10020000}; // @[TLB.scala:414:25] wire [40:0] _homogeneous_T_41 = {1'h0, _homogeneous_T_40}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_42 = _homogeneous_T_41 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_43 = _homogeneous_T_42; // @[Parameters.scala:137:46] wire _homogeneous_T_44 = _homogeneous_T_43 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [39:0] _GEN_3 = {mpu_physaddr[39:32], mpu_physaddr[31:0] ^ 32'h80000000}; // @[TLB.scala:414:25, :417:15] wire [39:0] _homogeneous_T_45; // @[Parameters.scala:137:31] assign _homogeneous_T_45 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _homogeneous_T_87; // @[Parameters.scala:137:31] assign _homogeneous_T_87 = _GEN_3; // @[Parameters.scala:137:31] wire [39:0] _homogeneous_T_102; // @[Parameters.scala:137:31] assign _homogeneous_T_102 = _GEN_3; // @[Parameters.scala:137:31] wire [40:0] _homogeneous_T_46 = {1'h0, _homogeneous_T_45}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_47 = _homogeneous_T_46 & 41'h1FFF0000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_48 = _homogeneous_T_47; // @[Parameters.scala:137:46] wire _homogeneous_T_49 = _homogeneous_T_48 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_51 = _homogeneous_T_50 | _homogeneous_T_9; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_52 = _homogeneous_T_51 | _homogeneous_T_14; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_53 = _homogeneous_T_52 | _homogeneous_T_19; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_54 = _homogeneous_T_53 | _homogeneous_T_24; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_55 = _homogeneous_T_54 | _homogeneous_T_29; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_56 = _homogeneous_T_55 | _homogeneous_T_34; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_57 = _homogeneous_T_56 | _homogeneous_T_39; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_58 = _homogeneous_T_57 | _homogeneous_T_44; // @[TLBPermissions.scala:101:65] wire homogeneous = _homogeneous_T_58 | _homogeneous_T_49; // @[TLBPermissions.scala:101:65] wire [40:0] _homogeneous_T_61 = {1'h0, _homogeneous_T_60}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_62 = _homogeneous_T_61 & 41'h8A110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_63 = _homogeneous_T_62; // @[Parameters.scala:137:46] wire _homogeneous_T_64 = _homogeneous_T_63 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_65 = _homogeneous_T_64; // @[TLBPermissions.scala:87:66] wire _homogeneous_T_66 = ~_homogeneous_T_65; // @[TLBPermissions.scala:87:{22,66}] wire [40:0] _homogeneous_T_68 = {1'h0, _homogeneous_T_67}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_69 = _homogeneous_T_68 & 41'h9E113000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_70 = _homogeneous_T_69; // @[Parameters.scala:137:46] wire _homogeneous_T_71 = _homogeneous_T_70 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_92 = _homogeneous_T_71; // @[TLBPermissions.scala:85:66] wire [40:0] _homogeneous_T_73 = {1'h0, _homogeneous_T_72}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_74 = _homogeneous_T_73 & 41'h9E113000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_75 = _homogeneous_T_74; // @[Parameters.scala:137:46] wire _homogeneous_T_76 = _homogeneous_T_75 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _homogeneous_T_78 = {1'h0, _homogeneous_T_77}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_79 = _homogeneous_T_78 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_80 = _homogeneous_T_79; // @[Parameters.scala:137:46] wire _homogeneous_T_81 = _homogeneous_T_80 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _homogeneous_T_83 = {1'h0, _homogeneous_T_82}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_84 = _homogeneous_T_83 & 41'h9E110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_85 = _homogeneous_T_84; // @[Parameters.scala:137:46] wire _homogeneous_T_86 = _homogeneous_T_85 == 41'h0; // @[Parameters.scala:137:{46,59}] wire [40:0] _homogeneous_T_88 = {1'h0, _homogeneous_T_87}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_89 = _homogeneous_T_88 & 41'h90000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_90 = _homogeneous_T_89; // @[Parameters.scala:137:46] wire _homogeneous_T_91 = _homogeneous_T_90 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_93 = _homogeneous_T_92 | _homogeneous_T_76; // @[TLBPermissions.scala:85:66] wire _homogeneous_T_94 = _homogeneous_T_93 | _homogeneous_T_81; // @[TLBPermissions.scala:85:66] wire _homogeneous_T_95 = _homogeneous_T_94 | _homogeneous_T_86; // @[TLBPermissions.scala:85:66] wire _homogeneous_T_96 = _homogeneous_T_95 | _homogeneous_T_91; // @[TLBPermissions.scala:85:66] wire [40:0] _homogeneous_T_98 = {1'h0, _homogeneous_T_97}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_99 = _homogeneous_T_98 & 41'h8E000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_100 = _homogeneous_T_99; // @[Parameters.scala:137:46] wire _homogeneous_T_101 = _homogeneous_T_100 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_107 = _homogeneous_T_101; // @[TLBPermissions.scala:85:66] wire [40:0] _homogeneous_T_103 = {1'h0, _homogeneous_T_102}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_104 = _homogeneous_T_103 & 41'h80000000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_105 = _homogeneous_T_104; // @[Parameters.scala:137:46] wire _homogeneous_T_106 = _homogeneous_T_105 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_108 = _homogeneous_T_107 | _homogeneous_T_106; // @[TLBPermissions.scala:85:66] wire [40:0] _homogeneous_T_110 = {1'h0, _homogeneous_T_109}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_111 = _homogeneous_T_110 & 41'h8A110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_112 = _homogeneous_T_111; // @[Parameters.scala:137:46] wire _homogeneous_T_113 = _homogeneous_T_112 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_114 = _homogeneous_T_113; // @[TLBPermissions.scala:87:66] wire _homogeneous_T_115 = ~_homogeneous_T_114; // @[TLBPermissions.scala:87:{22,66}] wire [40:0] _homogeneous_T_117 = {1'h0, _homogeneous_T_116}; // @[Parameters.scala:137:{31,41}] wire [40:0] _homogeneous_T_118 = _homogeneous_T_117 & 41'h8A110000; // @[Parameters.scala:137:{41,46}] wire [40:0] _homogeneous_T_119 = _homogeneous_T_118; // @[Parameters.scala:137:46] wire _homogeneous_T_120 = _homogeneous_T_119 == 41'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_121 = _homogeneous_T_120; // @[TLBPermissions.scala:87:66] wire _homogeneous_T_122 = ~_homogeneous_T_121; // @[TLBPermissions.scala:87:{22,66}] wire _deny_access_to_debug_T = ~(mpu_priv[2]); // @[TLB.scala:415:27, :428:39] wire [40:0] _deny_access_to_debug_T_2 = {1'h0, _deny_access_to_debug_T_1}; // @[Parameters.scala:137:{31,41}] wire [40:0] _deny_access_to_debug_T_3 = _deny_access_to_debug_T_2 & 41'h1FFFFFFF000; // @[Parameters.scala:137:{41,46}] wire [40:0] _deny_access_to_debug_T_4 = _deny_access_to_debug_T_3; // @[Parameters.scala:137:46] wire _deny_access_to_debug_T_5 = _deny_access_to_debug_T_4 == 41'h0; // @[Parameters.scala:137:{46,59}] wire deny_access_to_debug = _deny_access_to_debug_T & _deny_access_to_debug_T_5; // @[TLB.scala:428:{39,50}] wire _prot_r_T = ~deny_access_to_debug; // @[TLB.scala:428:50, :429:33] wire _prot_r_T_1 = _pma_io_resp_r & _prot_r_T; // @[TLB.scala:422:19, :429:{30,33}] wire prot_r = _prot_r_T_1 & _pmp_io_r; // @[TLB.scala:416:19, :429:{30,55}] wire newEntry_pr = prot_r; // @[TLB.scala:429:55, :449:24] wire _prot_w_T = ~deny_access_to_debug; // @[TLB.scala:428:50, :429:33, :430:33] wire _prot_w_T_1 = _pma_io_resp_w & _prot_w_T; // @[TLB.scala:422:19, :430:{30,33}] wire prot_w = _prot_w_T_1 & _pmp_io_w; // @[TLB.scala:416:19, :430:{30,55}] wire newEntry_pw = prot_w; // @[TLB.scala:430:55, :449:24] wire _prot_x_T = ~deny_access_to_debug; // @[TLB.scala:428:50, :429:33, :434:33] wire _prot_x_T_1 = _pma_io_resp_x & _prot_x_T; // @[TLB.scala:422:19, :434:{30,33}] wire prot_x = _prot_x_T_1 & _pmp_io_x; // @[TLB.scala:416:19, :434:{30,55}] wire newEntry_px = prot_x; // @[TLB.scala:434:55, :449:24] wire [3:0][26:0] _GEN_4 = {{sectored_entries_3_0_tag_vpn}, {sectored_entries_2_0_tag_vpn}, {sectored_entries_1_0_tag_vpn}, {sectored_entries_0_0_tag_vpn}}; // @[TLB.scala:174:61, :339:29] wire [3:0] _GEN_5 = {{sectored_entries_3_0_tag_v}, {sectored_entries_2_0_tag_v}, {sectored_entries_1_0_tag_v}, {sectored_entries_0_0_tag_v}}; // @[TLB.scala:174:61, :339:29] wire [3:0][41:0] _GEN_6 = {{sectored_entries_3_0_data_0}, {sectored_entries_2_0_data_0}, {sectored_entries_1_0_data_0}, {sectored_entries_0_0_data_0}}; // @[TLB.scala:174:61, :339:29] wire [41:0] _entries_WIRE_1 = _GEN_6[memIdx]; // @[package.scala:163:13] wire [3:0] _GEN_7 = {{sectored_entries_3_0_valid_0}, {sectored_entries_2_0_valid_0}, {sectored_entries_1_0_valid_0}, {sectored_entries_0_0_valid_0}}; // @[TLB.scala:174:61, :339:29] wire [3:0][26:0] _GEN_8 = {{sectored_entries_3_1_tag_vpn}, {sectored_entries_2_1_tag_vpn}, {sectored_entries_1_1_tag_vpn}, {sectored_entries_0_1_tag_vpn}}; // @[TLB.scala:174:61, :339:29] wire [3:0] _GEN_9 = {{sectored_entries_3_1_tag_v}, {sectored_entries_2_1_tag_v}, {sectored_entries_1_1_tag_v}, {sectored_entries_0_1_tag_v}}; // @[TLB.scala:174:61, :339:29] wire [3:0][41:0] _GEN_10 = {{sectored_entries_3_1_data_0}, {sectored_entries_2_1_data_0}, {sectored_entries_1_1_data_0}, {sectored_entries_0_1_data_0}}; // @[TLB.scala:174:61, :339:29] wire [41:0] _entries_WIRE_3 = _GEN_10[memIdx]; // @[package.scala:163:13] wire [3:0] _GEN_11 = {{sectored_entries_3_1_valid_0}, {sectored_entries_2_1_valid_0}, {sectored_entries_1_1_valid_0}, {sectored_entries_0_1_valid_0}}; // @[TLB.scala:174:61, :339:29] wire [3:0][26:0] _GEN_12 = {{sectored_entries_3_2_tag_vpn}, {sectored_entries_2_2_tag_vpn}, {sectored_entries_1_2_tag_vpn}, {sectored_entries_0_2_tag_vpn}}; // @[TLB.scala:174:61, :339:29] wire [3:0] _GEN_13 = {{sectored_entries_3_2_tag_v}, {sectored_entries_2_2_tag_v}, {sectored_entries_1_2_tag_v}, {sectored_entries_0_2_tag_v}}; // @[TLB.scala:174:61, :339:29] wire [3:0][41:0] _GEN_14 = {{sectored_entries_3_2_data_0}, {sectored_entries_2_2_data_0}, {sectored_entries_1_2_data_0}, {sectored_entries_0_2_data_0}}; // @[TLB.scala:174:61, :339:29] wire [41:0] _entries_WIRE_5 = _GEN_14[memIdx]; // @[package.scala:163:13] wire [3:0] _GEN_15 = {{sectored_entries_3_2_valid_0}, {sectored_entries_2_2_valid_0}, {sectored_entries_1_2_valid_0}, {sectored_entries_0_2_valid_0}}; // @[TLB.scala:174:61, :339:29] wire [3:0][26:0] _GEN_16 = {{sectored_entries_3_3_tag_vpn}, {sectored_entries_2_3_tag_vpn}, {sectored_entries_1_3_tag_vpn}, {sectored_entries_0_3_tag_vpn}}; // @[TLB.scala:174:61, :339:29] wire [3:0] _GEN_17 = {{sectored_entries_3_3_tag_v}, {sectored_entries_2_3_tag_v}, {sectored_entries_1_3_tag_v}, {sectored_entries_0_3_tag_v}}; // @[TLB.scala:174:61, :339:29] wire [3:0][41:0] _GEN_18 = {{sectored_entries_3_3_data_0}, {sectored_entries_2_3_data_0}, {sectored_entries_1_3_data_0}, {sectored_entries_0_3_data_0}}; // @[TLB.scala:174:61, :339:29] wire [41:0] _entries_WIRE_7 = _GEN_18[memIdx]; // @[package.scala:163:13] wire [3:0] _GEN_19 = {{sectored_entries_3_3_valid_0}, {sectored_entries_2_3_valid_0}, {sectored_entries_1_3_valid_0}, {sectored_entries_0_3_valid_0}}; // @[TLB.scala:174:61, :339:29] wire [26:0] _GEN_20 = _GEN_4[memIdx] ^ vpn; // @[package.scala:163:13] wire [26:0] _sector_hits_T; // @[TLB.scala:174:61] assign _sector_hits_T = _GEN_20; // @[TLB.scala:174:61] wire [26:0] _hitsVec_T; // @[TLB.scala:174:61] assign _hitsVec_T = _GEN_20; // @[TLB.scala:174:61] wire [26:0] _sector_hits_T_1 = _sector_hits_T; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_2 = _sector_hits_T_1 == 27'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_3 = ~_GEN_5[memIdx]; // @[package.scala:163:13] wire _sector_hits_T_4 = _sector_hits_T_2 & _sector_hits_T_3; // @[TLB.scala:174:{86,95,105}] wire sector_hits_0 = _GEN_7[memIdx] & _sector_hits_T_4; // @[package.scala:163:13] wire [26:0] _GEN_21 = _GEN_8[memIdx] ^ vpn; // @[package.scala:163:13] wire [26:0] _sector_hits_T_5; // @[TLB.scala:174:61] assign _sector_hits_T_5 = _GEN_21; // @[TLB.scala:174:61] wire [26:0] _hitsVec_T_6; // @[TLB.scala:174:61] assign _hitsVec_T_6 = _GEN_21; // @[TLB.scala:174:61] wire [26:0] _sector_hits_T_6 = _sector_hits_T_5; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_7 = _sector_hits_T_6 == 27'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_8 = ~_GEN_9[memIdx]; // @[package.scala:163:13] wire _sector_hits_T_9 = _sector_hits_T_7 & _sector_hits_T_8; // @[TLB.scala:174:{86,95,105}] wire sector_hits_1 = _GEN_11[memIdx] & _sector_hits_T_9; // @[package.scala:163:13] wire [26:0] _GEN_22 = _GEN_12[memIdx] ^ vpn; // @[package.scala:163:13] wire [26:0] _sector_hits_T_10; // @[TLB.scala:174:61] assign _sector_hits_T_10 = _GEN_22; // @[TLB.scala:174:61] wire [26:0] _hitsVec_T_12; // @[TLB.scala:174:61] assign _hitsVec_T_12 = _GEN_22; // @[TLB.scala:174:61] wire [26:0] _sector_hits_T_11 = _sector_hits_T_10; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_12 = _sector_hits_T_11 == 27'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_13 = ~_GEN_13[memIdx]; // @[package.scala:163:13] wire _sector_hits_T_14 = _sector_hits_T_12 & _sector_hits_T_13; // @[TLB.scala:174:{86,95,105}] wire sector_hits_2 = _GEN_15[memIdx] & _sector_hits_T_14; // @[package.scala:163:13] wire [26:0] _GEN_23 = _GEN_16[memIdx] ^ vpn; // @[package.scala:163:13] wire [26:0] _sector_hits_T_15; // @[TLB.scala:174:61] assign _sector_hits_T_15 = _GEN_23; // @[TLB.scala:174:61] wire [26:0] _hitsVec_T_18; // @[TLB.scala:174:61] assign _hitsVec_T_18 = _GEN_23; // @[TLB.scala:174:61] wire [26:0] _sector_hits_T_16 = _sector_hits_T_15; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_17 = _sector_hits_T_16 == 27'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_18 = ~_GEN_17[memIdx]; // @[package.scala:163:13] wire _sector_hits_T_19 = _sector_hits_T_17 & _sector_hits_T_18; // @[TLB.scala:174:{86,95,105}] wire sector_hits_3 = _GEN_19[memIdx] & _sector_hits_T_19; // @[package.scala:163:13] wire _superpage_hits_tagMatch_T = ~superpage_entries_0_tag_v; // @[TLB.scala:178:43, :341:30] wire superpage_hits_tagMatch = superpage_entries_0_valid_0 & _superpage_hits_tagMatch_T; // @[TLB.scala:178:{33,43}, :341:30] wire [26:0] _T_1876 = superpage_entries_0_tag_vpn ^ vpn; // @[TLB.scala:183:52, :335:30, :341:30] wire [26:0] _superpage_hits_T; // @[TLB.scala:183:52] assign _superpage_hits_T = _T_1876; // @[TLB.scala:183:52] wire [26:0] _superpage_hits_T_5; // @[TLB.scala:183:52] assign _superpage_hits_T_5 = _T_1876; // @[TLB.scala:183:52] wire [26:0] _superpage_hits_T_10; // @[TLB.scala:183:52] assign _superpage_hits_T_10 = _T_1876; // @[TLB.scala:183:52] wire [26:0] _hitsVec_T_24; // @[TLB.scala:183:52] assign _hitsVec_T_24 = _T_1876; // @[TLB.scala:183:52] wire [26:0] _hitsVec_T_29; // @[TLB.scala:183:52] assign _hitsVec_T_29 = _T_1876; // @[TLB.scala:183:52] wire [26:0] _hitsVec_T_34; // @[TLB.scala:183:52] assign _hitsVec_T_34 = _T_1876; // @[TLB.scala:183:52] wire [8:0] _superpage_hits_T_1 = _superpage_hits_T[26:18]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_2 = _superpage_hits_T_1 == 9'h0; // @[TLB.scala:183:{58,79}, :318:7, :320:14] wire _superpage_hits_T_3 = _superpage_hits_T_2; // @[TLB.scala:183:{40,79}] wire _superpage_hits_T_4 = superpage_hits_tagMatch & _superpage_hits_T_3; // @[TLB.scala:178:33, :183:{29,40}] wire _GEN_24 = superpage_entries_0_level == 2'h0; // @[TLB.scala:182:28, :341:30] wire _superpage_hits_ignore_T_1; // @[TLB.scala:182:28] assign _superpage_hits_ignore_T_1 = _GEN_24; // @[TLB.scala:182:28] wire _hitsVec_ignore_T_1; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_1 = _GEN_24; // @[TLB.scala:182:28] wire _ppn_ignore_T; // @[TLB.scala:197:28] assign _ppn_ignore_T = _GEN_24; // @[TLB.scala:182:28, :197:28] wire _ignore_T_1; // @[TLB.scala:182:28] assign _ignore_T_1 = _GEN_24; // @[TLB.scala:182:28] wire superpage_hits_ignore_1 = _superpage_hits_ignore_T_1; // @[TLB.scala:182:{28,34}] wire [8:0] _superpage_hits_T_6 = _superpage_hits_T_5[17:9]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_7 = _superpage_hits_T_6 == 9'h0; // @[TLB.scala:183:{58,79}, :318:7, :320:14] wire _superpage_hits_T_8 = superpage_hits_ignore_1 | _superpage_hits_T_7; // @[TLB.scala:182:34, :183:{40,79}] wire _superpage_hits_T_9 = _superpage_hits_T_4 & _superpage_hits_T_8; // @[TLB.scala:183:{29,40}] wire superpage_hits_0 = _superpage_hits_T_9; // @[TLB.scala:183:29] wire _superpage_hits_ignore_T_2 = ~(superpage_entries_0_level[1]); // @[TLB.scala:182:28, :341:30] wire [8:0] _superpage_hits_T_11 = _superpage_hits_T_10[8:0]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_12 = _superpage_hits_T_11 == 9'h0; // @[TLB.scala:183:{58,79}, :318:7, :320:14] wire [26:0] _hitsVec_T_1 = _hitsVec_T; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_2 = _hitsVec_T_1 == 27'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_3 = ~_GEN_5[memIdx]; // @[package.scala:163:13] wire _hitsVec_T_4 = _hitsVec_T_2 & _hitsVec_T_3; // @[TLB.scala:174:{86,95,105}] wire _hitsVec_T_5 = _GEN_7[memIdx] & _hitsVec_T_4; // @[package.scala:163:13] wire hitsVec_0 = vm_enabled & _hitsVec_T_5; // @[TLB.scala:188:18, :399:61, :440:44] wire [26:0] _hitsVec_T_7 = _hitsVec_T_6; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_8 = _hitsVec_T_7 == 27'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_9 = ~_GEN_9[memIdx]; // @[package.scala:163:13] wire _hitsVec_T_10 = _hitsVec_T_8 & _hitsVec_T_9; // @[TLB.scala:174:{86,95,105}] wire _hitsVec_T_11 = _GEN_11[memIdx] & _hitsVec_T_10; // @[package.scala:163:13] wire hitsVec_1 = vm_enabled & _hitsVec_T_11; // @[TLB.scala:188:18, :399:61, :440:44] wire [26:0] _hitsVec_T_13 = _hitsVec_T_12; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_14 = _hitsVec_T_13 == 27'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_15 = ~_GEN_13[memIdx]; // @[package.scala:163:13] wire _hitsVec_T_16 = _hitsVec_T_14 & _hitsVec_T_15; // @[TLB.scala:174:{86,95,105}] wire _hitsVec_T_17 = _GEN_15[memIdx] & _hitsVec_T_16; // @[package.scala:163:13] wire hitsVec_2 = vm_enabled & _hitsVec_T_17; // @[TLB.scala:188:18, :399:61, :440:44] wire [26:0] _hitsVec_T_19 = _hitsVec_T_18; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_20 = _hitsVec_T_19 == 27'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_21 = ~_GEN_17[memIdx]; // @[package.scala:163:13] wire _hitsVec_T_22 = _hitsVec_T_20 & _hitsVec_T_21; // @[TLB.scala:174:{86,95,105}] wire _hitsVec_T_23 = _GEN_19[memIdx] & _hitsVec_T_22; // @[package.scala:163:13] wire hitsVec_3 = vm_enabled & _hitsVec_T_23; // @[TLB.scala:188:18, :399:61, :440:44] wire _hitsVec_tagMatch_T = ~superpage_entries_0_tag_v; // @[TLB.scala:178:43, :341:30] wire hitsVec_tagMatch = superpage_entries_0_valid_0 & _hitsVec_tagMatch_T; // @[TLB.scala:178:{33,43}, :341:30] wire [8:0] _hitsVec_T_25 = _hitsVec_T_24[26:18]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_26 = _hitsVec_T_25 == 9'h0; // @[TLB.scala:183:{58,79}, :318:7, :320:14] wire _hitsVec_T_27 = _hitsVec_T_26; // @[TLB.scala:183:{40,79}] wire _hitsVec_T_28 = hitsVec_tagMatch & _hitsVec_T_27; // @[TLB.scala:178:33, :183:{29,40}] wire hitsVec_ignore_1 = _hitsVec_ignore_T_1; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_30 = _hitsVec_T_29[17:9]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_31 = _hitsVec_T_30 == 9'h0; // @[TLB.scala:183:{58,79}, :318:7, :320:14] wire _hitsVec_T_32 = hitsVec_ignore_1 | _hitsVec_T_31; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_33 = _hitsVec_T_28 & _hitsVec_T_32; // @[TLB.scala:183:{29,40}] wire _hitsVec_T_38 = _hitsVec_T_33; // @[TLB.scala:183:29] wire _hitsVec_ignore_T_2 = ~(superpage_entries_0_level[1]); // @[TLB.scala:182:28, :341:30] wire [8:0] _hitsVec_T_35 = _hitsVec_T_34[8:0]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_36 = _hitsVec_T_35 == 9'h0; // @[TLB.scala:183:{58,79}, :318:7, :320:14] wire hitsVec_4 = vm_enabled & _hitsVec_T_38; // @[TLB.scala:183:29, :399:61, :440:44] wire _hitsVec_tagMatch_T_1 = ~special_entry_tag_v; // @[TLB.scala:178:43, :346:56] wire hitsVec_tagMatch_1 = special_entry_valid_0 & _hitsVec_tagMatch_T_1; // @[TLB.scala:178:{33,43}, :346:56] wire [26:0] _T_1974 = special_entry_tag_vpn ^ vpn; // @[TLB.scala:183:52, :335:30, :346:56] wire [26:0] _hitsVec_T_39; // @[TLB.scala:183:52] assign _hitsVec_T_39 = _T_1974; // @[TLB.scala:183:52] wire [26:0] _hitsVec_T_44; // @[TLB.scala:183:52] assign _hitsVec_T_44 = _T_1974; // @[TLB.scala:183:52] wire [26:0] _hitsVec_T_49; // @[TLB.scala:183:52] assign _hitsVec_T_49 = _T_1974; // @[TLB.scala:183:52] wire [8:0] _hitsVec_T_40 = _hitsVec_T_39[26:18]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_41 = _hitsVec_T_40 == 9'h0; // @[TLB.scala:183:{58,79}, :318:7, :320:14] wire _hitsVec_T_42 = _hitsVec_T_41; // @[TLB.scala:183:{40,79}] wire _hitsVec_T_43 = hitsVec_tagMatch_1 & _hitsVec_T_42; // @[TLB.scala:178:33, :183:{29,40}] wire hitsVec_ignore_4 = _hitsVec_ignore_T_4; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_45 = _hitsVec_T_44[17:9]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_46 = _hitsVec_T_45 == 9'h0; // @[TLB.scala:183:{58,79}, :318:7, :320:14] wire _hitsVec_T_47 = hitsVec_ignore_4 | _hitsVec_T_46; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_48 = _hitsVec_T_43 & _hitsVec_T_47; // @[TLB.scala:183:{29,40}] wire _hitsVec_ignore_T_5 = ~(special_entry_level[1]); // @[TLB.scala:182:28, :197:28, :346:56] wire hitsVec_ignore_5 = _hitsVec_ignore_T_5; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_50 = _hitsVec_T_49[8:0]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_51 = _hitsVec_T_50 == 9'h0; // @[TLB.scala:183:{58,79}, :318:7, :320:14] wire _hitsVec_T_52 = hitsVec_ignore_5 | _hitsVec_T_51; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_53 = _hitsVec_T_48 & _hitsVec_T_52; // @[TLB.scala:183:{29,40}] wire hitsVec_5 = vm_enabled & _hitsVec_T_53; // @[TLB.scala:183:29, :399:61, :440:44] wire [1:0] real_hits_lo_hi = {hitsVec_2, hitsVec_1}; // @[package.scala:45:27] wire [2:0] real_hits_lo = {real_hits_lo_hi, hitsVec_0}; // @[package.scala:45:27] wire [1:0] real_hits_hi_hi = {hitsVec_5, hitsVec_4}; // @[package.scala:45:27] wire [2:0] real_hits_hi = {real_hits_hi_hi, hitsVec_3}; // @[package.scala:45:27] wire [5:0] real_hits = {real_hits_hi, real_hits_lo}; // @[package.scala:45:27] wire [5:0] _tlb_hit_T = real_hits; // @[package.scala:45:27] wire _hits_T = ~vm_enabled; // @[TLB.scala:399:61, :442:18] wire [6:0] hits = {_hits_T, real_hits}; // @[package.scala:45:27] wire _newEntry_g_T; // @[TLB.scala:453:25] wire _newEntry_sw_T_6; // @[PTW.scala:151:40] wire _newEntry_sx_T_5; // @[PTW.scala:153:35] wire _newEntry_sr_T_5; // @[PTW.scala:149:35] wire newEntry_g; // @[TLB.scala:449:24] wire newEntry_sw; // @[TLB.scala:449:24] wire newEntry_sx; // @[TLB.scala:449:24] wire newEntry_sr; // @[TLB.scala:449:24] wire newEntry_ppp; // @[TLB.scala:449:24] wire newEntry_pal; // @[TLB.scala:449:24] wire newEntry_paa; // @[TLB.scala:449:24] wire newEntry_eff; // @[TLB.scala:449:24] assign _newEntry_g_T = io_ptw_resp_bits_pte_g_0 & io_ptw_resp_bits_pte_v_0; // @[TLB.scala:318:7, :453:25] assign newEntry_g = _newEntry_g_T; // @[TLB.scala:449:24, :453:25] wire _newEntry_ae_stage2_T = io_ptw_resp_bits_ae_final_0 & io_ptw_resp_bits_gpa_is_pte_0; // @[TLB.scala:318:7, :456:53] wire _newEntry_sr_T = ~io_ptw_resp_bits_pte_w_0; // @[TLB.scala:318:7] wire _newEntry_sr_T_1 = io_ptw_resp_bits_pte_x_0 & _newEntry_sr_T; // @[TLB.scala:318:7] wire _newEntry_sr_T_2 = io_ptw_resp_bits_pte_r_0 | _newEntry_sr_T_1; // @[TLB.scala:318:7] wire _newEntry_sr_T_3 = io_ptw_resp_bits_pte_v_0 & _newEntry_sr_T_2; // @[TLB.scala:318:7] wire _newEntry_sr_T_4 = _newEntry_sr_T_3 & io_ptw_resp_bits_pte_a_0; // @[TLB.scala:318:7] assign _newEntry_sr_T_5 = _newEntry_sr_T_4 & io_ptw_resp_bits_pte_r_0; // @[TLB.scala:318:7] assign newEntry_sr = _newEntry_sr_T_5; // @[TLB.scala:449:24] wire _newEntry_sw_T = ~io_ptw_resp_bits_pte_w_0; // @[TLB.scala:318:7] wire _newEntry_sw_T_1 = io_ptw_resp_bits_pte_x_0 & _newEntry_sw_T; // @[TLB.scala:318:7] wire _newEntry_sw_T_2 = io_ptw_resp_bits_pte_r_0 | _newEntry_sw_T_1; // @[TLB.scala:318:7] wire _newEntry_sw_T_3 = io_ptw_resp_bits_pte_v_0 & _newEntry_sw_T_2; // @[TLB.scala:318:7] wire _newEntry_sw_T_4 = _newEntry_sw_T_3 & io_ptw_resp_bits_pte_a_0; // @[TLB.scala:318:7] wire _newEntry_sw_T_5 = _newEntry_sw_T_4 & io_ptw_resp_bits_pte_w_0; // @[TLB.scala:318:7] assign _newEntry_sw_T_6 = _newEntry_sw_T_5 & io_ptw_resp_bits_pte_d_0; // @[TLB.scala:318:7] assign newEntry_sw = _newEntry_sw_T_6; // @[TLB.scala:449:24] wire _newEntry_sx_T = ~io_ptw_resp_bits_pte_w_0; // @[TLB.scala:318:7] wire _newEntry_sx_T_1 = io_ptw_resp_bits_pte_x_0 & _newEntry_sx_T; // @[TLB.scala:318:7] wire _newEntry_sx_T_2 = io_ptw_resp_bits_pte_r_0 | _newEntry_sx_T_1; // @[TLB.scala:318:7] wire _newEntry_sx_T_3 = io_ptw_resp_bits_pte_v_0 & _newEntry_sx_T_2; // @[TLB.scala:318:7] wire _newEntry_sx_T_4 = _newEntry_sx_T_3 & io_ptw_resp_bits_pte_a_0; // @[TLB.scala:318:7] assign _newEntry_sx_T_5 = _newEntry_sx_T_4 & io_ptw_resp_bits_pte_x_0; // @[TLB.scala:318:7] assign newEntry_sx = _newEntry_sx_T_5; // @[TLB.scala:449:24] wire [1:0] _GEN_25 = {newEntry_c, 1'h0}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_lo_lo_lo; // @[TLB.scala:217:24] assign special_entry_data_0_lo_lo_lo = _GEN_25; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_lo_lo_lo; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_lo_lo_lo = _GEN_25; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_data_0_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_0_data_0_lo_lo_lo = _GEN_25; // @[TLB.scala:217:24] wire [1:0] sectored_entries_1_data_0_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_1_data_0_lo_lo_lo = _GEN_25; // @[TLB.scala:217:24] wire [1:0] sectored_entries_2_data_0_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_2_data_0_lo_lo_lo = _GEN_25; // @[TLB.scala:217:24] wire [1:0] sectored_entries_3_data_0_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_3_data_0_lo_lo_lo = _GEN_25; // @[TLB.scala:217:24] wire [1:0] _GEN_26 = {newEntry_pal, newEntry_paa}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24] assign special_entry_data_0_lo_lo_hi_hi = _GEN_26; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_lo_lo_hi_hi = _GEN_26; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_data_0_lo_lo_hi_hi = _GEN_26; // @[TLB.scala:217:24] wire [1:0] sectored_entries_1_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_1_data_0_lo_lo_hi_hi = _GEN_26; // @[TLB.scala:217:24] wire [1:0] sectored_entries_2_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_2_data_0_lo_lo_hi_hi = _GEN_26; // @[TLB.scala:217:24] wire [1:0] sectored_entries_3_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_3_data_0_lo_lo_hi_hi = _GEN_26; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_lo_lo_hi = {special_entry_data_0_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] special_entry_data_0_lo_lo = {special_entry_data_0_lo_lo_hi, special_entry_data_0_lo_lo_lo}; // @[TLB.scala:217:24] wire [1:0] _GEN_27 = {newEntry_px, newEntry_pr}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24] assign special_entry_data_0_lo_hi_lo_hi = _GEN_27; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_lo_hi_lo_hi = _GEN_27; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_data_0_lo_hi_lo_hi = _GEN_27; // @[TLB.scala:217:24] wire [1:0] sectored_entries_1_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_1_data_0_lo_hi_lo_hi = _GEN_27; // @[TLB.scala:217:24] wire [1:0] sectored_entries_2_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_2_data_0_lo_hi_lo_hi = _GEN_27; // @[TLB.scala:217:24] wire [1:0] sectored_entries_3_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_3_data_0_lo_hi_lo_hi = _GEN_27; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_lo_hi_lo = {special_entry_data_0_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [1:0] _GEN_28 = {newEntry_hx, newEntry_hr}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_lo_hi_hi_hi; // @[TLB.scala:217:24] assign special_entry_data_0_lo_hi_hi_hi = _GEN_28; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_lo_hi_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_lo_hi_hi_hi = _GEN_28; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_data_0_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_data_0_lo_hi_hi_hi = _GEN_28; // @[TLB.scala:217:24] wire [1:0] sectored_entries_1_data_0_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_1_data_0_lo_hi_hi_hi = _GEN_28; // @[TLB.scala:217:24] wire [1:0] sectored_entries_2_data_0_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_2_data_0_lo_hi_hi_hi = _GEN_28; // @[TLB.scala:217:24] wire [1:0] sectored_entries_3_data_0_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_3_data_0_lo_hi_hi_hi = _GEN_28; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_lo_hi_hi = {special_entry_data_0_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] special_entry_data_0_lo_hi = {special_entry_data_0_lo_hi_hi, special_entry_data_0_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] special_entry_data_0_lo = {special_entry_data_0_lo_hi, special_entry_data_0_lo_lo}; // @[TLB.scala:217:24] wire [1:0] _GEN_29 = {newEntry_sx, newEntry_sr}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_hi_lo_lo_hi; // @[TLB.scala:217:24] assign special_entry_data_0_hi_lo_lo_hi = _GEN_29; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_hi_lo_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_hi_lo_lo_hi = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_data_0_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_data_0_hi_lo_lo_hi = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_1_data_0_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_1_data_0_hi_lo_lo_hi = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_2_data_0_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_2_data_0_hi_lo_lo_hi = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_3_data_0_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_3_data_0_hi_lo_lo_hi = _GEN_29; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_hi_lo_lo = {special_entry_data_0_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [1:0] _GEN_30 = {newEntry_pf, newEntry_gf}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_hi_lo_hi_hi; // @[TLB.scala:217:24] assign special_entry_data_0_hi_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_hi_lo_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_hi_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_data_0_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_data_0_hi_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_1_data_0_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_1_data_0_hi_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_2_data_0_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_2_data_0_hi_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_3_data_0_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_3_data_0_hi_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_hi_lo_hi = {special_entry_data_0_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] special_entry_data_0_hi_lo = {special_entry_data_0_hi_lo_hi, special_entry_data_0_hi_lo_lo}; // @[TLB.scala:217:24] wire [1:0] _GEN_31 = {newEntry_ae_ptw, newEntry_ae_final}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_hi_hi_lo_hi; // @[TLB.scala:217:24] assign special_entry_data_0_hi_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_hi_hi_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_hi_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_data_0_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_data_0_hi_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_1_data_0_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_1_data_0_hi_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_2_data_0_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_2_data_0_hi_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_3_data_0_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_3_data_0_hi_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_hi_hi_lo = {special_entry_data_0_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [20:0] _GEN_32 = {newEntry_ppn, newEntry_u}; // @[TLB.scala:217:24, :449:24] wire [20:0] special_entry_data_0_hi_hi_hi_hi; // @[TLB.scala:217:24] assign special_entry_data_0_hi_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [20:0] superpage_entries_0_data_0_hi_hi_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_hi_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [20:0] sectored_entries_0_data_0_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_data_0_hi_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [20:0] sectored_entries_1_data_0_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_1_data_0_hi_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [20:0] sectored_entries_2_data_0_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_2_data_0_hi_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [20:0] sectored_entries_3_data_0_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_3_data_0_hi_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [21:0] special_entry_data_0_hi_hi_hi = {special_entry_data_0_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] special_entry_data_0_hi_hi = {special_entry_data_0_hi_hi_hi, special_entry_data_0_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] special_entry_data_0_hi = {special_entry_data_0_hi_hi, special_entry_data_0_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _special_entry_data_0_T = {special_entry_data_0_hi, special_entry_data_0_lo}; // @[TLB.scala:217:24] wire _superpage_entries_0_level_T = io_ptw_resp_bits_level_0[0]; // @[package.scala:163:13] wire [2:0] superpage_entries_0_data_0_lo_lo_hi = {superpage_entries_0_data_0_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] superpage_entries_0_data_0_lo_lo = {superpage_entries_0_data_0_lo_lo_hi, superpage_entries_0_data_0_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_0_data_0_lo_hi_lo = {superpage_entries_0_data_0_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] superpage_entries_0_data_0_lo_hi_hi = {superpage_entries_0_data_0_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] superpage_entries_0_data_0_lo_hi = {superpage_entries_0_data_0_lo_hi_hi, superpage_entries_0_data_0_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] superpage_entries_0_data_0_lo = {superpage_entries_0_data_0_lo_hi, superpage_entries_0_data_0_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_0_data_0_hi_lo_lo = {superpage_entries_0_data_0_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] superpage_entries_0_data_0_hi_lo_hi = {superpage_entries_0_data_0_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] superpage_entries_0_data_0_hi_lo = {superpage_entries_0_data_0_hi_lo_hi, superpage_entries_0_data_0_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_0_data_0_hi_hi_lo = {superpage_entries_0_data_0_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] superpage_entries_0_data_0_hi_hi_hi = {superpage_entries_0_data_0_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] superpage_entries_0_data_0_hi_hi = {superpage_entries_0_data_0_hi_hi_hi, superpage_entries_0_data_0_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] superpage_entries_0_data_0_hi = {superpage_entries_0_data_0_hi_hi, superpage_entries_0_data_0_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _superpage_entries_0_data_0_T = {superpage_entries_0_data_0_hi, superpage_entries_0_data_0_lo}; // @[TLB.scala:217:24] wire [1:0] r_memIdx = r_refill_tag[1:0]; // @[package.scala:163:13] wire [1:0] waddr_1 = r_sectored_hit_valid ? r_sectored_hit_bits : r_sectored_repl_addr; // @[TLB.scala:356:33, :357:27, :485:22] wire [2:0] sectored_entries_0_data_0_lo_lo_hi = {sectored_entries_0_data_0_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_0_data_0_lo_lo = {sectored_entries_0_data_0_lo_lo_hi, sectored_entries_0_data_0_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_data_0_lo_hi_lo = {sectored_entries_0_data_0_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_data_0_lo_hi_hi = {sectored_entries_0_data_0_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_data_0_lo_hi = {sectored_entries_0_data_0_lo_hi_hi, sectored_entries_0_data_0_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_0_data_0_lo = {sectored_entries_0_data_0_lo_hi, sectored_entries_0_data_0_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_data_0_hi_lo_lo = {sectored_entries_0_data_0_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_data_0_hi_lo_hi = {sectored_entries_0_data_0_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_data_0_hi_lo = {sectored_entries_0_data_0_hi_lo_hi, sectored_entries_0_data_0_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_data_0_hi_hi_lo = {sectored_entries_0_data_0_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_0_data_0_hi_hi_hi = {sectored_entries_0_data_0_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_0_data_0_hi_hi = {sectored_entries_0_data_0_hi_hi_hi, sectored_entries_0_data_0_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_0_data_0_hi = {sectored_entries_0_data_0_hi_hi, sectored_entries_0_data_0_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_0_data_0_T = {sectored_entries_0_data_0_hi, sectored_entries_0_data_0_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_1_data_0_lo_lo_hi = {sectored_entries_1_data_0_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_1_data_0_lo_lo = {sectored_entries_1_data_0_lo_lo_hi, sectored_entries_1_data_0_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_1_data_0_lo_hi_lo = {sectored_entries_1_data_0_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_1_data_0_lo_hi_hi = {sectored_entries_1_data_0_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_1_data_0_lo_hi = {sectored_entries_1_data_0_lo_hi_hi, sectored_entries_1_data_0_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_1_data_0_lo = {sectored_entries_1_data_0_lo_hi, sectored_entries_1_data_0_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_1_data_0_hi_lo_lo = {sectored_entries_1_data_0_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_1_data_0_hi_lo_hi = {sectored_entries_1_data_0_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_1_data_0_hi_lo = {sectored_entries_1_data_0_hi_lo_hi, sectored_entries_1_data_0_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_1_data_0_hi_hi_lo = {sectored_entries_1_data_0_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_1_data_0_hi_hi_hi = {sectored_entries_1_data_0_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_1_data_0_hi_hi = {sectored_entries_1_data_0_hi_hi_hi, sectored_entries_1_data_0_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_1_data_0_hi = {sectored_entries_1_data_0_hi_hi, sectored_entries_1_data_0_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_1_data_0_T = {sectored_entries_1_data_0_hi, sectored_entries_1_data_0_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_2_data_0_lo_lo_hi = {sectored_entries_2_data_0_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_2_data_0_lo_lo = {sectored_entries_2_data_0_lo_lo_hi, sectored_entries_2_data_0_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_2_data_0_lo_hi_lo = {sectored_entries_2_data_0_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_2_data_0_lo_hi_hi = {sectored_entries_2_data_0_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_2_data_0_lo_hi = {sectored_entries_2_data_0_lo_hi_hi, sectored_entries_2_data_0_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_2_data_0_lo = {sectored_entries_2_data_0_lo_hi, sectored_entries_2_data_0_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_2_data_0_hi_lo_lo = {sectored_entries_2_data_0_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_2_data_0_hi_lo_hi = {sectored_entries_2_data_0_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_2_data_0_hi_lo = {sectored_entries_2_data_0_hi_lo_hi, sectored_entries_2_data_0_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_2_data_0_hi_hi_lo = {sectored_entries_2_data_0_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_2_data_0_hi_hi_hi = {sectored_entries_2_data_0_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_2_data_0_hi_hi = {sectored_entries_2_data_0_hi_hi_hi, sectored_entries_2_data_0_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_2_data_0_hi = {sectored_entries_2_data_0_hi_hi, sectored_entries_2_data_0_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_2_data_0_T = {sectored_entries_2_data_0_hi, sectored_entries_2_data_0_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_3_data_0_lo_lo_hi = {sectored_entries_3_data_0_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_3_data_0_lo_lo = {sectored_entries_3_data_0_lo_lo_hi, sectored_entries_3_data_0_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_3_data_0_lo_hi_lo = {sectored_entries_3_data_0_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_3_data_0_lo_hi_hi = {sectored_entries_3_data_0_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_3_data_0_lo_hi = {sectored_entries_3_data_0_lo_hi_hi, sectored_entries_3_data_0_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_3_data_0_lo = {sectored_entries_3_data_0_lo_hi, sectored_entries_3_data_0_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_3_data_0_hi_lo_lo = {sectored_entries_3_data_0_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_3_data_0_hi_lo_hi = {sectored_entries_3_data_0_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_3_data_0_hi_lo = {sectored_entries_3_data_0_hi_lo_hi, sectored_entries_3_data_0_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_3_data_0_hi_hi_lo = {sectored_entries_3_data_0_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_3_data_0_hi_hi_hi = {sectored_entries_3_data_0_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_3_data_0_hi_hi = {sectored_entries_3_data_0_hi_hi_hi, sectored_entries_3_data_0_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_3_data_0_hi = {sectored_entries_3_data_0_hi_hi, sectored_entries_3_data_0_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_3_data_0_T = {sectored_entries_3_data_0_hi, sectored_entries_3_data_0_lo}; // @[TLB.scala:217:24] wire [19:0] _entries_T_22; // @[TLB.scala:170:77] wire _entries_T_21; // @[TLB.scala:170:77] wire _entries_T_20; // @[TLB.scala:170:77] wire _entries_T_19; // @[TLB.scala:170:77] wire _entries_T_18; // @[TLB.scala:170:77] wire _entries_T_17; // @[TLB.scala:170:77] wire _entries_T_16; // @[TLB.scala:170:77] wire _entries_T_15; // @[TLB.scala:170:77] wire _entries_T_14; // @[TLB.scala:170:77] wire _entries_T_13; // @[TLB.scala:170:77] wire _entries_T_12; // @[TLB.scala:170:77] wire _entries_T_11; // @[TLB.scala:170:77] wire _entries_T_10; // @[TLB.scala:170:77] wire _entries_T_9; // @[TLB.scala:170:77] wire _entries_T_8; // @[TLB.scala:170:77] wire _entries_T_7; // @[TLB.scala:170:77] wire _entries_T_6; // @[TLB.scala:170:77] wire _entries_T_5; // @[TLB.scala:170:77] wire _entries_T_4; // @[TLB.scala:170:77] wire _entries_T_3; // @[TLB.scala:170:77] wire _entries_T_2; // @[TLB.scala:170:77] wire _entries_T_1; // @[TLB.scala:170:77] wire _entries_T; // @[TLB.scala:170:77] assign _entries_T = _entries_WIRE_1[0]; // @[TLB.scala:170:77] wire _entries_WIRE_fragmented_superpage = _entries_T; // @[TLB.scala:170:77] assign _entries_T_1 = _entries_WIRE_1[1]; // @[TLB.scala:170:77] wire _entries_WIRE_c = _entries_T_1; // @[TLB.scala:170:77] assign _entries_T_2 = _entries_WIRE_1[2]; // @[TLB.scala:170:77] wire _entries_WIRE_eff = _entries_T_2; // @[TLB.scala:170:77] assign _entries_T_3 = _entries_WIRE_1[3]; // @[TLB.scala:170:77] wire _entries_WIRE_paa = _entries_T_3; // @[TLB.scala:170:77] assign _entries_T_4 = _entries_WIRE_1[4]; // @[TLB.scala:170:77] wire _entries_WIRE_pal = _entries_T_4; // @[TLB.scala:170:77] assign _entries_T_5 = _entries_WIRE_1[5]; // @[TLB.scala:170:77] wire _entries_WIRE_ppp = _entries_T_5; // @[TLB.scala:170:77] assign _entries_T_6 = _entries_WIRE_1[6]; // @[TLB.scala:170:77] wire _entries_WIRE_pr = _entries_T_6; // @[TLB.scala:170:77] assign _entries_T_7 = _entries_WIRE_1[7]; // @[TLB.scala:170:77] wire _entries_WIRE_px = _entries_T_7; // @[TLB.scala:170:77] assign _entries_T_8 = _entries_WIRE_1[8]; // @[TLB.scala:170:77] wire _entries_WIRE_pw = _entries_T_8; // @[TLB.scala:170:77] assign _entries_T_9 = _entries_WIRE_1[9]; // @[TLB.scala:170:77] wire _entries_WIRE_hr = _entries_T_9; // @[TLB.scala:170:77] assign _entries_T_10 = _entries_WIRE_1[10]; // @[TLB.scala:170:77] wire _entries_WIRE_hx = _entries_T_10; // @[TLB.scala:170:77] assign _entries_T_11 = _entries_WIRE_1[11]; // @[TLB.scala:170:77] wire _entries_WIRE_hw = _entries_T_11; // @[TLB.scala:170:77] assign _entries_T_12 = _entries_WIRE_1[12]; // @[TLB.scala:170:77] wire _entries_WIRE_sr = _entries_T_12; // @[TLB.scala:170:77] assign _entries_T_13 = _entries_WIRE_1[13]; // @[TLB.scala:170:77] wire _entries_WIRE_sx = _entries_T_13; // @[TLB.scala:170:77] assign _entries_T_14 = _entries_WIRE_1[14]; // @[TLB.scala:170:77] wire _entries_WIRE_sw = _entries_T_14; // @[TLB.scala:170:77] assign _entries_T_15 = _entries_WIRE_1[15]; // @[TLB.scala:170:77] wire _entries_WIRE_gf = _entries_T_15; // @[TLB.scala:170:77] assign _entries_T_16 = _entries_WIRE_1[16]; // @[TLB.scala:170:77] wire _entries_WIRE_pf = _entries_T_16; // @[TLB.scala:170:77] assign _entries_T_17 = _entries_WIRE_1[17]; // @[TLB.scala:170:77] wire _entries_WIRE_ae_stage2 = _entries_T_17; // @[TLB.scala:170:77] assign _entries_T_18 = _entries_WIRE_1[18]; // @[TLB.scala:170:77] wire _entries_WIRE_ae_final = _entries_T_18; // @[TLB.scala:170:77] assign _entries_T_19 = _entries_WIRE_1[19]; // @[TLB.scala:170:77] wire _entries_WIRE_ae_ptw = _entries_T_19; // @[TLB.scala:170:77] assign _entries_T_20 = _entries_WIRE_1[20]; // @[TLB.scala:170:77] wire _entries_WIRE_g = _entries_T_20; // @[TLB.scala:170:77] assign _entries_T_21 = _entries_WIRE_1[21]; // @[TLB.scala:170:77] wire _entries_WIRE_u = _entries_T_21; // @[TLB.scala:170:77] assign _entries_T_22 = _entries_WIRE_1[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_ppn = _entries_T_22; // @[TLB.scala:170:77] wire [19:0] _entries_T_45; // @[TLB.scala:170:77] wire _entries_T_44; // @[TLB.scala:170:77] wire _entries_T_43; // @[TLB.scala:170:77] wire _entries_T_42; // @[TLB.scala:170:77] wire _entries_T_41; // @[TLB.scala:170:77] wire _entries_T_40; // @[TLB.scala:170:77] wire _entries_T_39; // @[TLB.scala:170:77] wire _entries_T_38; // @[TLB.scala:170:77] wire _entries_T_37; // @[TLB.scala:170:77] wire _entries_T_36; // @[TLB.scala:170:77] wire _entries_T_35; // @[TLB.scala:170:77] wire _entries_T_34; // @[TLB.scala:170:77] wire _entries_T_33; // @[TLB.scala:170:77] wire _entries_T_32; // @[TLB.scala:170:77] wire _entries_T_31; // @[TLB.scala:170:77] wire _entries_T_30; // @[TLB.scala:170:77] wire _entries_T_29; // @[TLB.scala:170:77] wire _entries_T_28; // @[TLB.scala:170:77] wire _entries_T_27; // @[TLB.scala:170:77] wire _entries_T_26; // @[TLB.scala:170:77] wire _entries_T_25; // @[TLB.scala:170:77] wire _entries_T_24; // @[TLB.scala:170:77] wire _entries_T_23; // @[TLB.scala:170:77] assign _entries_T_23 = _entries_WIRE_3[0]; // @[TLB.scala:170:77] wire _entries_WIRE_2_fragmented_superpage = _entries_T_23; // @[TLB.scala:170:77] assign _entries_T_24 = _entries_WIRE_3[1]; // @[TLB.scala:170:77] wire _entries_WIRE_2_c = _entries_T_24; // @[TLB.scala:170:77] assign _entries_T_25 = _entries_WIRE_3[2]; // @[TLB.scala:170:77] wire _entries_WIRE_2_eff = _entries_T_25; // @[TLB.scala:170:77] assign _entries_T_26 = _entries_WIRE_3[3]; // @[TLB.scala:170:77] wire _entries_WIRE_2_paa = _entries_T_26; // @[TLB.scala:170:77] assign _entries_T_27 = _entries_WIRE_3[4]; // @[TLB.scala:170:77] wire _entries_WIRE_2_pal = _entries_T_27; // @[TLB.scala:170:77] assign _entries_T_28 = _entries_WIRE_3[5]; // @[TLB.scala:170:77] wire _entries_WIRE_2_ppp = _entries_T_28; // @[TLB.scala:170:77] assign _entries_T_29 = _entries_WIRE_3[6]; // @[TLB.scala:170:77] wire _entries_WIRE_2_pr = _entries_T_29; // @[TLB.scala:170:77] assign _entries_T_30 = _entries_WIRE_3[7]; // @[TLB.scala:170:77] wire _entries_WIRE_2_px = _entries_T_30; // @[TLB.scala:170:77] assign _entries_T_31 = _entries_WIRE_3[8]; // @[TLB.scala:170:77] wire _entries_WIRE_2_pw = _entries_T_31; // @[TLB.scala:170:77] assign _entries_T_32 = _entries_WIRE_3[9]; // @[TLB.scala:170:77] wire _entries_WIRE_2_hr = _entries_T_32; // @[TLB.scala:170:77] assign _entries_T_33 = _entries_WIRE_3[10]; // @[TLB.scala:170:77] wire _entries_WIRE_2_hx = _entries_T_33; // @[TLB.scala:170:77] assign _entries_T_34 = _entries_WIRE_3[11]; // @[TLB.scala:170:77] wire _entries_WIRE_2_hw = _entries_T_34; // @[TLB.scala:170:77] assign _entries_T_35 = _entries_WIRE_3[12]; // @[TLB.scala:170:77] wire _entries_WIRE_2_sr = _entries_T_35; // @[TLB.scala:170:77] assign _entries_T_36 = _entries_WIRE_3[13]; // @[TLB.scala:170:77] wire _entries_WIRE_2_sx = _entries_T_36; // @[TLB.scala:170:77] assign _entries_T_37 = _entries_WIRE_3[14]; // @[TLB.scala:170:77] wire _entries_WIRE_2_sw = _entries_T_37; // @[TLB.scala:170:77] assign _entries_T_38 = _entries_WIRE_3[15]; // @[TLB.scala:170:77] wire _entries_WIRE_2_gf = _entries_T_38; // @[TLB.scala:170:77] assign _entries_T_39 = _entries_WIRE_3[16]; // @[TLB.scala:170:77] wire _entries_WIRE_2_pf = _entries_T_39; // @[TLB.scala:170:77] assign _entries_T_40 = _entries_WIRE_3[17]; // @[TLB.scala:170:77] wire _entries_WIRE_2_ae_stage2 = _entries_T_40; // @[TLB.scala:170:77] assign _entries_T_41 = _entries_WIRE_3[18]; // @[TLB.scala:170:77] wire _entries_WIRE_2_ae_final = _entries_T_41; // @[TLB.scala:170:77] assign _entries_T_42 = _entries_WIRE_3[19]; // @[TLB.scala:170:77] wire _entries_WIRE_2_ae_ptw = _entries_T_42; // @[TLB.scala:170:77] assign _entries_T_43 = _entries_WIRE_3[20]; // @[TLB.scala:170:77] wire _entries_WIRE_2_g = _entries_T_43; // @[TLB.scala:170:77] assign _entries_T_44 = _entries_WIRE_3[21]; // @[TLB.scala:170:77] wire _entries_WIRE_2_u = _entries_T_44; // @[TLB.scala:170:77] assign _entries_T_45 = _entries_WIRE_3[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_2_ppn = _entries_T_45; // @[TLB.scala:170:77] wire [19:0] _entries_T_68; // @[TLB.scala:170:77] wire _entries_T_67; // @[TLB.scala:170:77] wire _entries_T_66; // @[TLB.scala:170:77] wire _entries_T_65; // @[TLB.scala:170:77] wire _entries_T_64; // @[TLB.scala:170:77] wire _entries_T_63; // @[TLB.scala:170:77] wire _entries_T_62; // @[TLB.scala:170:77] wire _entries_T_61; // @[TLB.scala:170:77] wire _entries_T_60; // @[TLB.scala:170:77] wire _entries_T_59; // @[TLB.scala:170:77] wire _entries_T_58; // @[TLB.scala:170:77] wire _entries_T_57; // @[TLB.scala:170:77] wire _entries_T_56; // @[TLB.scala:170:77] wire _entries_T_55; // @[TLB.scala:170:77] wire _entries_T_54; // @[TLB.scala:170:77] wire _entries_T_53; // @[TLB.scala:170:77] wire _entries_T_52; // @[TLB.scala:170:77] wire _entries_T_51; // @[TLB.scala:170:77] wire _entries_T_50; // @[TLB.scala:170:77] wire _entries_T_49; // @[TLB.scala:170:77] wire _entries_T_48; // @[TLB.scala:170:77] wire _entries_T_47; // @[TLB.scala:170:77] wire _entries_T_46; // @[TLB.scala:170:77] assign _entries_T_46 = _entries_WIRE_5[0]; // @[TLB.scala:170:77] wire _entries_WIRE_4_fragmented_superpage = _entries_T_46; // @[TLB.scala:170:77] assign _entries_T_47 = _entries_WIRE_5[1]; // @[TLB.scala:170:77] wire _entries_WIRE_4_c = _entries_T_47; // @[TLB.scala:170:77] assign _entries_T_48 = _entries_WIRE_5[2]; // @[TLB.scala:170:77] wire _entries_WIRE_4_eff = _entries_T_48; // @[TLB.scala:170:77] assign _entries_T_49 = _entries_WIRE_5[3]; // @[TLB.scala:170:77] wire _entries_WIRE_4_paa = _entries_T_49; // @[TLB.scala:170:77] assign _entries_T_50 = _entries_WIRE_5[4]; // @[TLB.scala:170:77] wire _entries_WIRE_4_pal = _entries_T_50; // @[TLB.scala:170:77] assign _entries_T_51 = _entries_WIRE_5[5]; // @[TLB.scala:170:77] wire _entries_WIRE_4_ppp = _entries_T_51; // @[TLB.scala:170:77] assign _entries_T_52 = _entries_WIRE_5[6]; // @[TLB.scala:170:77] wire _entries_WIRE_4_pr = _entries_T_52; // @[TLB.scala:170:77] assign _entries_T_53 = _entries_WIRE_5[7]; // @[TLB.scala:170:77] wire _entries_WIRE_4_px = _entries_T_53; // @[TLB.scala:170:77] assign _entries_T_54 = _entries_WIRE_5[8]; // @[TLB.scala:170:77] wire _entries_WIRE_4_pw = _entries_T_54; // @[TLB.scala:170:77] assign _entries_T_55 = _entries_WIRE_5[9]; // @[TLB.scala:170:77] wire _entries_WIRE_4_hr = _entries_T_55; // @[TLB.scala:170:77] assign _entries_T_56 = _entries_WIRE_5[10]; // @[TLB.scala:170:77] wire _entries_WIRE_4_hx = _entries_T_56; // @[TLB.scala:170:77] assign _entries_T_57 = _entries_WIRE_5[11]; // @[TLB.scala:170:77] wire _entries_WIRE_4_hw = _entries_T_57; // @[TLB.scala:170:77] assign _entries_T_58 = _entries_WIRE_5[12]; // @[TLB.scala:170:77] wire _entries_WIRE_4_sr = _entries_T_58; // @[TLB.scala:170:77] assign _entries_T_59 = _entries_WIRE_5[13]; // @[TLB.scala:170:77] wire _entries_WIRE_4_sx = _entries_T_59; // @[TLB.scala:170:77] assign _entries_T_60 = _entries_WIRE_5[14]; // @[TLB.scala:170:77] wire _entries_WIRE_4_sw = _entries_T_60; // @[TLB.scala:170:77] assign _entries_T_61 = _entries_WIRE_5[15]; // @[TLB.scala:170:77] wire _entries_WIRE_4_gf = _entries_T_61; // @[TLB.scala:170:77] assign _entries_T_62 = _entries_WIRE_5[16]; // @[TLB.scala:170:77] wire _entries_WIRE_4_pf = _entries_T_62; // @[TLB.scala:170:77] assign _entries_T_63 = _entries_WIRE_5[17]; // @[TLB.scala:170:77] wire _entries_WIRE_4_ae_stage2 = _entries_T_63; // @[TLB.scala:170:77] assign _entries_T_64 = _entries_WIRE_5[18]; // @[TLB.scala:170:77] wire _entries_WIRE_4_ae_final = _entries_T_64; // @[TLB.scala:170:77] assign _entries_T_65 = _entries_WIRE_5[19]; // @[TLB.scala:170:77] wire _entries_WIRE_4_ae_ptw = _entries_T_65; // @[TLB.scala:170:77] assign _entries_T_66 = _entries_WIRE_5[20]; // @[TLB.scala:170:77] wire _entries_WIRE_4_g = _entries_T_66; // @[TLB.scala:170:77] assign _entries_T_67 = _entries_WIRE_5[21]; // @[TLB.scala:170:77] wire _entries_WIRE_4_u = _entries_T_67; // @[TLB.scala:170:77] assign _entries_T_68 = _entries_WIRE_5[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_4_ppn = _entries_T_68; // @[TLB.scala:170:77] wire [19:0] _entries_T_91; // @[TLB.scala:170:77] wire _entries_T_90; // @[TLB.scala:170:77] wire _entries_T_89; // @[TLB.scala:170:77] wire _entries_T_88; // @[TLB.scala:170:77] wire _entries_T_87; // @[TLB.scala:170:77] wire _entries_T_86; // @[TLB.scala:170:77] wire _entries_T_85; // @[TLB.scala:170:77] wire _entries_T_84; // @[TLB.scala:170:77] wire _entries_T_83; // @[TLB.scala:170:77] wire _entries_T_82; // @[TLB.scala:170:77] wire _entries_T_81; // @[TLB.scala:170:77] wire _entries_T_80; // @[TLB.scala:170:77] wire _entries_T_79; // @[TLB.scala:170:77] wire _entries_T_78; // @[TLB.scala:170:77] wire _entries_T_77; // @[TLB.scala:170:77] wire _entries_T_76; // @[TLB.scala:170:77] wire _entries_T_75; // @[TLB.scala:170:77] wire _entries_T_74; // @[TLB.scala:170:77] wire _entries_T_73; // @[TLB.scala:170:77] wire _entries_T_72; // @[TLB.scala:170:77] wire _entries_T_71; // @[TLB.scala:170:77] wire _entries_T_70; // @[TLB.scala:170:77] wire _entries_T_69; // @[TLB.scala:170:77] assign _entries_T_69 = _entries_WIRE_7[0]; // @[TLB.scala:170:77] wire _entries_WIRE_6_fragmented_superpage = _entries_T_69; // @[TLB.scala:170:77] assign _entries_T_70 = _entries_WIRE_7[1]; // @[TLB.scala:170:77] wire _entries_WIRE_6_c = _entries_T_70; // @[TLB.scala:170:77] assign _entries_T_71 = _entries_WIRE_7[2]; // @[TLB.scala:170:77] wire _entries_WIRE_6_eff = _entries_T_71; // @[TLB.scala:170:77] assign _entries_T_72 = _entries_WIRE_7[3]; // @[TLB.scala:170:77] wire _entries_WIRE_6_paa = _entries_T_72; // @[TLB.scala:170:77] assign _entries_T_73 = _entries_WIRE_7[4]; // @[TLB.scala:170:77] wire _entries_WIRE_6_pal = _entries_T_73; // @[TLB.scala:170:77] assign _entries_T_74 = _entries_WIRE_7[5]; // @[TLB.scala:170:77] wire _entries_WIRE_6_ppp = _entries_T_74; // @[TLB.scala:170:77] assign _entries_T_75 = _entries_WIRE_7[6]; // @[TLB.scala:170:77] wire _entries_WIRE_6_pr = _entries_T_75; // @[TLB.scala:170:77] assign _entries_T_76 = _entries_WIRE_7[7]; // @[TLB.scala:170:77] wire _entries_WIRE_6_px = _entries_T_76; // @[TLB.scala:170:77] assign _entries_T_77 = _entries_WIRE_7[8]; // @[TLB.scala:170:77] wire _entries_WIRE_6_pw = _entries_T_77; // @[TLB.scala:170:77] assign _entries_T_78 = _entries_WIRE_7[9]; // @[TLB.scala:170:77] wire _entries_WIRE_6_hr = _entries_T_78; // @[TLB.scala:170:77] assign _entries_T_79 = _entries_WIRE_7[10]; // @[TLB.scala:170:77] wire _entries_WIRE_6_hx = _entries_T_79; // @[TLB.scala:170:77] assign _entries_T_80 = _entries_WIRE_7[11]; // @[TLB.scala:170:77] wire _entries_WIRE_6_hw = _entries_T_80; // @[TLB.scala:170:77] assign _entries_T_81 = _entries_WIRE_7[12]; // @[TLB.scala:170:77] wire _entries_WIRE_6_sr = _entries_T_81; // @[TLB.scala:170:77] assign _entries_T_82 = _entries_WIRE_7[13]; // @[TLB.scala:170:77] wire _entries_WIRE_6_sx = _entries_T_82; // @[TLB.scala:170:77] assign _entries_T_83 = _entries_WIRE_7[14]; // @[TLB.scala:170:77] wire _entries_WIRE_6_sw = _entries_T_83; // @[TLB.scala:170:77] assign _entries_T_84 = _entries_WIRE_7[15]; // @[TLB.scala:170:77] wire _entries_WIRE_6_gf = _entries_T_84; // @[TLB.scala:170:77] assign _entries_T_85 = _entries_WIRE_7[16]; // @[TLB.scala:170:77] wire _entries_WIRE_6_pf = _entries_T_85; // @[TLB.scala:170:77] assign _entries_T_86 = _entries_WIRE_7[17]; // @[TLB.scala:170:77] wire _entries_WIRE_6_ae_stage2 = _entries_T_86; // @[TLB.scala:170:77] assign _entries_T_87 = _entries_WIRE_7[18]; // @[TLB.scala:170:77] wire _entries_WIRE_6_ae_final = _entries_T_87; // @[TLB.scala:170:77] assign _entries_T_88 = _entries_WIRE_7[19]; // @[TLB.scala:170:77] wire _entries_WIRE_6_ae_ptw = _entries_T_88; // @[TLB.scala:170:77] assign _entries_T_89 = _entries_WIRE_7[20]; // @[TLB.scala:170:77] wire _entries_WIRE_6_g = _entries_T_89; // @[TLB.scala:170:77] assign _entries_T_90 = _entries_WIRE_7[21]; // @[TLB.scala:170:77] wire _entries_WIRE_6_u = _entries_T_90; // @[TLB.scala:170:77] assign _entries_T_91 = _entries_WIRE_7[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_6_ppn = _entries_T_91; // @[TLB.scala:170:77] wire [19:0] _entries_T_114; // @[TLB.scala:170:77] wire _entries_T_113; // @[TLB.scala:170:77] wire _entries_T_112; // @[TLB.scala:170:77] wire _entries_T_111; // @[TLB.scala:170:77] wire _entries_T_110; // @[TLB.scala:170:77] wire _entries_T_109; // @[TLB.scala:170:77] wire _entries_T_108; // @[TLB.scala:170:77] wire _entries_T_107; // @[TLB.scala:170:77] wire _entries_T_106; // @[TLB.scala:170:77] wire _entries_T_105; // @[TLB.scala:170:77] wire _entries_T_104; // @[TLB.scala:170:77] wire _entries_T_103; // @[TLB.scala:170:77] wire _entries_T_102; // @[TLB.scala:170:77] wire _entries_T_101; // @[TLB.scala:170:77] wire _entries_T_100; // @[TLB.scala:170:77] wire _entries_T_99; // @[TLB.scala:170:77] wire _entries_T_98; // @[TLB.scala:170:77] wire _entries_T_97; // @[TLB.scala:170:77] wire _entries_T_96; // @[TLB.scala:170:77] wire _entries_T_95; // @[TLB.scala:170:77] wire _entries_T_94; // @[TLB.scala:170:77] wire _entries_T_93; // @[TLB.scala:170:77] wire _entries_T_92; // @[TLB.scala:170:77] assign _entries_T_92 = _entries_WIRE_9[0]; // @[TLB.scala:170:77] wire _entries_WIRE_8_fragmented_superpage = _entries_T_92; // @[TLB.scala:170:77] assign _entries_T_93 = _entries_WIRE_9[1]; // @[TLB.scala:170:77] wire _entries_WIRE_8_c = _entries_T_93; // @[TLB.scala:170:77] assign _entries_T_94 = _entries_WIRE_9[2]; // @[TLB.scala:170:77] wire _entries_WIRE_8_eff = _entries_T_94; // @[TLB.scala:170:77] assign _entries_T_95 = _entries_WIRE_9[3]; // @[TLB.scala:170:77] wire _entries_WIRE_8_paa = _entries_T_95; // @[TLB.scala:170:77] assign _entries_T_96 = _entries_WIRE_9[4]; // @[TLB.scala:170:77] wire _entries_WIRE_8_pal = _entries_T_96; // @[TLB.scala:170:77] assign _entries_T_97 = _entries_WIRE_9[5]; // @[TLB.scala:170:77] wire _entries_WIRE_8_ppp = _entries_T_97; // @[TLB.scala:170:77] assign _entries_T_98 = _entries_WIRE_9[6]; // @[TLB.scala:170:77] wire _entries_WIRE_8_pr = _entries_T_98; // @[TLB.scala:170:77] assign _entries_T_99 = _entries_WIRE_9[7]; // @[TLB.scala:170:77] wire _entries_WIRE_8_px = _entries_T_99; // @[TLB.scala:170:77] assign _entries_T_100 = _entries_WIRE_9[8]; // @[TLB.scala:170:77] wire _entries_WIRE_8_pw = _entries_T_100; // @[TLB.scala:170:77] assign _entries_T_101 = _entries_WIRE_9[9]; // @[TLB.scala:170:77] wire _entries_WIRE_8_hr = _entries_T_101; // @[TLB.scala:170:77] assign _entries_T_102 = _entries_WIRE_9[10]; // @[TLB.scala:170:77] wire _entries_WIRE_8_hx = _entries_T_102; // @[TLB.scala:170:77] assign _entries_T_103 = _entries_WIRE_9[11]; // @[TLB.scala:170:77] wire _entries_WIRE_8_hw = _entries_T_103; // @[TLB.scala:170:77] assign _entries_T_104 = _entries_WIRE_9[12]; // @[TLB.scala:170:77] wire _entries_WIRE_8_sr = _entries_T_104; // @[TLB.scala:170:77] assign _entries_T_105 = _entries_WIRE_9[13]; // @[TLB.scala:170:77] wire _entries_WIRE_8_sx = _entries_T_105; // @[TLB.scala:170:77] assign _entries_T_106 = _entries_WIRE_9[14]; // @[TLB.scala:170:77] wire _entries_WIRE_8_sw = _entries_T_106; // @[TLB.scala:170:77] assign _entries_T_107 = _entries_WIRE_9[15]; // @[TLB.scala:170:77] wire _entries_WIRE_8_gf = _entries_T_107; // @[TLB.scala:170:77] assign _entries_T_108 = _entries_WIRE_9[16]; // @[TLB.scala:170:77] wire _entries_WIRE_8_pf = _entries_T_108; // @[TLB.scala:170:77] assign _entries_T_109 = _entries_WIRE_9[17]; // @[TLB.scala:170:77] wire _entries_WIRE_8_ae_stage2 = _entries_T_109; // @[TLB.scala:170:77] assign _entries_T_110 = _entries_WIRE_9[18]; // @[TLB.scala:170:77] wire _entries_WIRE_8_ae_final = _entries_T_110; // @[TLB.scala:170:77] assign _entries_T_111 = _entries_WIRE_9[19]; // @[TLB.scala:170:77] wire _entries_WIRE_8_ae_ptw = _entries_T_111; // @[TLB.scala:170:77] assign _entries_T_112 = _entries_WIRE_9[20]; // @[TLB.scala:170:77] wire _entries_WIRE_8_g = _entries_T_112; // @[TLB.scala:170:77] assign _entries_T_113 = _entries_WIRE_9[21]; // @[TLB.scala:170:77] wire _entries_WIRE_8_u = _entries_T_113; // @[TLB.scala:170:77] assign _entries_T_114 = _entries_WIRE_9[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_8_ppn = _entries_T_114; // @[TLB.scala:170:77] wire [19:0] _entries_T_137; // @[TLB.scala:170:77] wire _entries_T_136; // @[TLB.scala:170:77] wire _entries_T_135; // @[TLB.scala:170:77] wire _entries_T_134; // @[TLB.scala:170:77] wire _entries_T_133; // @[TLB.scala:170:77] wire _entries_T_132; // @[TLB.scala:170:77] wire _entries_T_131; // @[TLB.scala:170:77] wire _entries_T_130; // @[TLB.scala:170:77] wire _entries_T_129; // @[TLB.scala:170:77] wire _entries_T_128; // @[TLB.scala:170:77] wire _entries_T_127; // @[TLB.scala:170:77] wire _entries_T_126; // @[TLB.scala:170:77] wire _entries_T_125; // @[TLB.scala:170:77] wire _entries_T_124; // @[TLB.scala:170:77] wire _entries_T_123; // @[TLB.scala:170:77] wire _entries_T_122; // @[TLB.scala:170:77] wire _entries_T_121; // @[TLB.scala:170:77] wire _entries_T_120; // @[TLB.scala:170:77] wire _entries_T_119; // @[TLB.scala:170:77] wire _entries_T_118; // @[TLB.scala:170:77] wire _entries_T_117; // @[TLB.scala:170:77] wire _entries_T_116; // @[TLB.scala:170:77] wire _entries_T_115; // @[TLB.scala:170:77] assign _entries_T_115 = _entries_WIRE_11[0]; // @[TLB.scala:170:77] wire _entries_WIRE_10_fragmented_superpage = _entries_T_115; // @[TLB.scala:170:77] assign _entries_T_116 = _entries_WIRE_11[1]; // @[TLB.scala:170:77] wire _entries_WIRE_10_c = _entries_T_116; // @[TLB.scala:170:77] assign _entries_T_117 = _entries_WIRE_11[2]; // @[TLB.scala:170:77] wire _entries_WIRE_10_eff = _entries_T_117; // @[TLB.scala:170:77] assign _entries_T_118 = _entries_WIRE_11[3]; // @[TLB.scala:170:77] wire _entries_WIRE_10_paa = _entries_T_118; // @[TLB.scala:170:77] assign _entries_T_119 = _entries_WIRE_11[4]; // @[TLB.scala:170:77] wire _entries_WIRE_10_pal = _entries_T_119; // @[TLB.scala:170:77] assign _entries_T_120 = _entries_WIRE_11[5]; // @[TLB.scala:170:77] wire _entries_WIRE_10_ppp = _entries_T_120; // @[TLB.scala:170:77] assign _entries_T_121 = _entries_WIRE_11[6]; // @[TLB.scala:170:77] wire _entries_WIRE_10_pr = _entries_T_121; // @[TLB.scala:170:77] assign _entries_T_122 = _entries_WIRE_11[7]; // @[TLB.scala:170:77] wire _entries_WIRE_10_px = _entries_T_122; // @[TLB.scala:170:77] assign _entries_T_123 = _entries_WIRE_11[8]; // @[TLB.scala:170:77] wire _entries_WIRE_10_pw = _entries_T_123; // @[TLB.scala:170:77] assign _entries_T_124 = _entries_WIRE_11[9]; // @[TLB.scala:170:77] wire _entries_WIRE_10_hr = _entries_T_124; // @[TLB.scala:170:77] assign _entries_T_125 = _entries_WIRE_11[10]; // @[TLB.scala:170:77] wire _entries_WIRE_10_hx = _entries_T_125; // @[TLB.scala:170:77] assign _entries_T_126 = _entries_WIRE_11[11]; // @[TLB.scala:170:77] wire _entries_WIRE_10_hw = _entries_T_126; // @[TLB.scala:170:77] assign _entries_T_127 = _entries_WIRE_11[12]; // @[TLB.scala:170:77] wire _entries_WIRE_10_sr = _entries_T_127; // @[TLB.scala:170:77] assign _entries_T_128 = _entries_WIRE_11[13]; // @[TLB.scala:170:77] wire _entries_WIRE_10_sx = _entries_T_128; // @[TLB.scala:170:77] assign _entries_T_129 = _entries_WIRE_11[14]; // @[TLB.scala:170:77] wire _entries_WIRE_10_sw = _entries_T_129; // @[TLB.scala:170:77] assign _entries_T_130 = _entries_WIRE_11[15]; // @[TLB.scala:170:77] wire _entries_WIRE_10_gf = _entries_T_130; // @[TLB.scala:170:77] assign _entries_T_131 = _entries_WIRE_11[16]; // @[TLB.scala:170:77] wire _entries_WIRE_10_pf = _entries_T_131; // @[TLB.scala:170:77] assign _entries_T_132 = _entries_WIRE_11[17]; // @[TLB.scala:170:77] wire _entries_WIRE_10_ae_stage2 = _entries_T_132; // @[TLB.scala:170:77] assign _entries_T_133 = _entries_WIRE_11[18]; // @[TLB.scala:170:77] wire _entries_WIRE_10_ae_final = _entries_T_133; // @[TLB.scala:170:77] assign _entries_T_134 = _entries_WIRE_11[19]; // @[TLB.scala:170:77] wire _entries_WIRE_10_ae_ptw = _entries_T_134; // @[TLB.scala:170:77] assign _entries_T_135 = _entries_WIRE_11[20]; // @[TLB.scala:170:77] wire _entries_WIRE_10_g = _entries_T_135; // @[TLB.scala:170:77] assign _entries_T_136 = _entries_WIRE_11[21]; // @[TLB.scala:170:77] wire _entries_WIRE_10_u = _entries_T_136; // @[TLB.scala:170:77] assign _entries_T_137 = _entries_WIRE_11[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_10_ppn = _entries_T_137; // @[TLB.scala:170:77] wire _ppn_T = ~vm_enabled; // @[TLB.scala:399:61, :442:18, :502:30] wire [1:0] ppn_res = _entries_barrier_4_io_y_ppn[19:18]; // @[package.scala:267:25] wire ppn_ignore = _ppn_ignore_T; // @[TLB.scala:197:{28,34}] wire [26:0] _ppn_T_1 = ppn_ignore ? vpn : 27'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [26:0] _ppn_T_2 = {_ppn_T_1[26:20], _ppn_T_1[19:0] | _entries_barrier_4_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_3 = _ppn_T_2[17:9]; // @[TLB.scala:198:{47,58}] wire [10:0] _ppn_T_4 = {ppn_res, _ppn_T_3}; // @[TLB.scala:195:26, :198:{18,58}] wire _ppn_ignore_T_1 = ~(superpage_entries_0_level[1]); // @[TLB.scala:182:28, :197:28, :341:30] wire [26:0] _ppn_T_6 = {_ppn_T_5[26:20], _ppn_T_5[19:0] | _entries_barrier_4_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_7 = _ppn_T_6[8:0]; // @[TLB.scala:198:{47,58}] wire [19:0] _ppn_T_8 = {_ppn_T_4, _ppn_T_7}; // @[TLB.scala:198:{18,58}] wire [1:0] ppn_res_1 = _entries_barrier_5_io_y_ppn[19:18]; // @[package.scala:267:25] wire ppn_ignore_2 = _ppn_ignore_T_2; // @[TLB.scala:197:{28,34}] wire [26:0] _ppn_T_9 = ppn_ignore_2 ? vpn : 27'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [26:0] _ppn_T_10 = {_ppn_T_9[26:20], _ppn_T_9[19:0] | _entries_barrier_5_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_11 = _ppn_T_10[17:9]; // @[TLB.scala:198:{47,58}] wire [10:0] _ppn_T_12 = {ppn_res_1, _ppn_T_11}; // @[TLB.scala:195:26, :198:{18,58}] wire _ppn_ignore_T_3 = ~(special_entry_level[1]); // @[TLB.scala:197:28, :346:56] wire ppn_ignore_3 = _ppn_ignore_T_3; // @[TLB.scala:197:{28,34}] wire [26:0] _ppn_T_13 = ppn_ignore_3 ? vpn : 27'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [26:0] _ppn_T_14 = {_ppn_T_13[26:20], _ppn_T_13[19:0] | _entries_barrier_5_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_15 = _ppn_T_14[8:0]; // @[TLB.scala:198:{47,58}] wire [19:0] _ppn_T_16 = {_ppn_T_12, _ppn_T_15}; // @[TLB.scala:198:{18,58}] wire [19:0] _ppn_T_17 = vpn[19:0]; // @[TLB.scala:335:30, :502:125] wire [19:0] _ppn_T_18 = hitsVec_0 ? _entries_barrier_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_19 = hitsVec_1 ? _entries_barrier_1_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_20 = hitsVec_2 ? _entries_barrier_2_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_21 = hitsVec_3 ? _entries_barrier_3_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_22 = hitsVec_4 ? _ppn_T_8 : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_23 = hitsVec_5 ? _ppn_T_16 : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_24 = _ppn_T ? _ppn_T_17 : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_25 = _ppn_T_18 | _ppn_T_19; // @[Mux.scala:30:73] wire [19:0] _ppn_T_26 = _ppn_T_25 | _ppn_T_20; // @[Mux.scala:30:73] wire [19:0] _ppn_T_27 = _ppn_T_26 | _ppn_T_21; // @[Mux.scala:30:73] wire [19:0] _ppn_T_28 = _ppn_T_27 | _ppn_T_22; // @[Mux.scala:30:73] wire [19:0] _ppn_T_29 = _ppn_T_28 | _ppn_T_23; // @[Mux.scala:30:73] wire [19:0] _ppn_T_30 = _ppn_T_29 | _ppn_T_24; // @[Mux.scala:30:73] wire [19:0] ppn = _ppn_T_30; // @[Mux.scala:30:73] wire [1:0] ptw_ae_array_lo_hi = {_entries_barrier_2_io_y_ae_ptw, _entries_barrier_1_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_ae_array_lo = {ptw_ae_array_lo_hi, _entries_barrier_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_ae_array_hi_hi = {_entries_barrier_5_io_y_ae_ptw, _entries_barrier_4_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_ae_array_hi = {ptw_ae_array_hi_hi, _entries_barrier_3_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [5:0] _ptw_ae_array_T = {ptw_ae_array_hi, ptw_ae_array_lo}; // @[package.scala:45:27] wire [6:0] ptw_ae_array = {1'h0, _ptw_ae_array_T}; // @[package.scala:45:27] wire [1:0] final_ae_array_lo_hi = {_entries_barrier_2_io_y_ae_final, _entries_barrier_1_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [2:0] final_ae_array_lo = {final_ae_array_lo_hi, _entries_barrier_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [1:0] final_ae_array_hi_hi = {_entries_barrier_5_io_y_ae_final, _entries_barrier_4_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [2:0] final_ae_array_hi = {final_ae_array_hi_hi, _entries_barrier_3_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [5:0] _final_ae_array_T = {final_ae_array_hi, final_ae_array_lo}; // @[package.scala:45:27] wire [6:0] final_ae_array = {1'h0, _final_ae_array_T}; // @[package.scala:45:27] wire [1:0] ptw_pf_array_lo_hi = {_entries_barrier_2_io_y_pf, _entries_barrier_1_io_y_pf}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_pf_array_lo = {ptw_pf_array_lo_hi, _entries_barrier_io_y_pf}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_pf_array_hi_hi = {_entries_barrier_5_io_y_pf, _entries_barrier_4_io_y_pf}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_pf_array_hi = {ptw_pf_array_hi_hi, _entries_barrier_3_io_y_pf}; // @[package.scala:45:27, :267:25] wire [5:0] _ptw_pf_array_T = {ptw_pf_array_hi, ptw_pf_array_lo}; // @[package.scala:45:27] wire [6:0] ptw_pf_array = {1'h0, _ptw_pf_array_T}; // @[package.scala:45:27] wire [1:0] ptw_gf_array_lo_hi = {_entries_barrier_2_io_y_gf, _entries_barrier_1_io_y_gf}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_gf_array_lo = {ptw_gf_array_lo_hi, _entries_barrier_io_y_gf}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_gf_array_hi_hi = {_entries_barrier_5_io_y_gf, _entries_barrier_4_io_y_gf}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_gf_array_hi = {ptw_gf_array_hi_hi, _entries_barrier_3_io_y_gf}; // @[package.scala:45:27, :267:25] wire [5:0] _ptw_gf_array_T = {ptw_gf_array_hi, ptw_gf_array_lo}; // @[package.scala:45:27] wire [6:0] ptw_gf_array = {1'h0, _ptw_gf_array_T}; // @[package.scala:45:27] wire [6:0] _gf_ld_array_T_3 = ptw_gf_array; // @[TLB.scala:509:25, :600:82] wire [6:0] _gf_st_array_T_2 = ptw_gf_array; // @[TLB.scala:509:25, :601:63] wire [6:0] _gf_inst_array_T_1 = ptw_gf_array; // @[TLB.scala:509:25, :602:46] wire [1:0] _GEN_33 = {_entries_barrier_2_io_y_u, _entries_barrier_1_io_y_u}; // @[package.scala:45:27, :267:25] wire [1:0] priv_rw_ok_lo_hi; // @[package.scala:45:27] assign priv_rw_ok_lo_hi = _GEN_33; // @[package.scala:45:27] wire [1:0] priv_rw_ok_lo_hi_1; // @[package.scala:45:27] assign priv_rw_ok_lo_hi_1 = _GEN_33; // @[package.scala:45:27] wire [1:0] priv_x_ok_lo_hi; // @[package.scala:45:27] assign priv_x_ok_lo_hi = _GEN_33; // @[package.scala:45:27] wire [1:0] priv_x_ok_lo_hi_1; // @[package.scala:45:27] assign priv_x_ok_lo_hi_1 = _GEN_33; // @[package.scala:45:27] wire [2:0] priv_rw_ok_lo = {priv_rw_ok_lo_hi, _entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25] wire [1:0] _GEN_34 = {_entries_barrier_5_io_y_u, _entries_barrier_4_io_y_u}; // @[package.scala:45:27, :267:25] wire [1:0] priv_rw_ok_hi_hi; // @[package.scala:45:27] assign priv_rw_ok_hi_hi = _GEN_34; // @[package.scala:45:27] wire [1:0] priv_rw_ok_hi_hi_1; // @[package.scala:45:27] assign priv_rw_ok_hi_hi_1 = _GEN_34; // @[package.scala:45:27] wire [1:0] priv_x_ok_hi_hi; // @[package.scala:45:27] assign priv_x_ok_hi_hi = _GEN_34; // @[package.scala:45:27] wire [1:0] priv_x_ok_hi_hi_1; // @[package.scala:45:27] assign priv_x_ok_hi_hi_1 = _GEN_34; // @[package.scala:45:27] wire [2:0] priv_rw_ok_hi = {priv_rw_ok_hi_hi, _entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25] wire [5:0] _priv_rw_ok_T_2 = {priv_rw_ok_hi, priv_rw_ok_lo}; // @[package.scala:45:27] wire [5:0] _priv_rw_ok_T_3 = _priv_rw_ok_T_2; // @[package.scala:45:27] wire [5:0] priv_rw_ok = _priv_rw_ok_T_3; // @[TLB.scala:513:{23,70}] wire [2:0] priv_rw_ok_lo_1 = {priv_rw_ok_lo_hi_1, _entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25] wire [2:0] priv_rw_ok_hi_1 = {priv_rw_ok_hi_hi_1, _entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25] wire [5:0] _priv_rw_ok_T_4 = {priv_rw_ok_hi_1, priv_rw_ok_lo_1}; // @[package.scala:45:27] wire [5:0] _priv_rw_ok_T_5 = ~_priv_rw_ok_T_4; // @[package.scala:45:27] wire [2:0] priv_x_ok_lo = {priv_x_ok_lo_hi, _entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25] wire [2:0] priv_x_ok_hi = {priv_x_ok_hi_hi, _entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25] wire [5:0] _priv_x_ok_T = {priv_x_ok_hi, priv_x_ok_lo}; // @[package.scala:45:27] wire [5:0] _priv_x_ok_T_1 = ~_priv_x_ok_T; // @[package.scala:45:27] wire [2:0] priv_x_ok_lo_1 = {priv_x_ok_lo_hi_1, _entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25] wire [2:0] priv_x_ok_hi_1 = {priv_x_ok_hi_hi_1, _entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25] wire [5:0] _priv_x_ok_T_2 = {priv_x_ok_hi_1, priv_x_ok_lo_1}; // @[package.scala:45:27] wire [5:0] priv_x_ok = _priv_x_ok_T_2; // @[package.scala:45:27] wire _stage1_bypass_T_1 = ~stage1_en; // @[TLB.scala:374:29, :517:83] wire [5:0] _stage1_bypass_T_2 = {6{_stage1_bypass_T_1}}; // @[TLB.scala:517:{68,83}] wire [1:0] stage1_bypass_lo_hi = {_entries_barrier_2_io_y_ae_stage2, _entries_barrier_1_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [2:0] stage1_bypass_lo = {stage1_bypass_lo_hi, _entries_barrier_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [1:0] stage1_bypass_hi_hi = {_entries_barrier_5_io_y_ae_stage2, _entries_barrier_4_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [2:0] stage1_bypass_hi = {stage1_bypass_hi_hi, _entries_barrier_3_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [5:0] _stage1_bypass_T_3 = {stage1_bypass_hi, stage1_bypass_lo}; // @[package.scala:45:27] wire [5:0] _stage1_bypass_T_4 = _stage1_bypass_T_2 | _stage1_bypass_T_3; // @[package.scala:45:27] wire [1:0] r_array_lo_hi = {_entries_barrier_2_io_y_sr, _entries_barrier_1_io_y_sr}; // @[package.scala:45:27, :267:25] wire [2:0] r_array_lo = {r_array_lo_hi, _entries_barrier_io_y_sr}; // @[package.scala:45:27, :267:25] wire [1:0] r_array_hi_hi = {_entries_barrier_5_io_y_sr, _entries_barrier_4_io_y_sr}; // @[package.scala:45:27, :267:25] wire [2:0] r_array_hi = {r_array_hi_hi, _entries_barrier_3_io_y_sr}; // @[package.scala:45:27, :267:25] wire [5:0] _r_array_T = {r_array_hi, r_array_lo}; // @[package.scala:45:27] wire [1:0] _GEN_35 = {_entries_barrier_2_io_y_sx, _entries_barrier_1_io_y_sx}; // @[package.scala:45:27, :267:25] wire [1:0] r_array_lo_hi_1; // @[package.scala:45:27] assign r_array_lo_hi_1 = _GEN_35; // @[package.scala:45:27] wire [1:0] x_array_lo_hi; // @[package.scala:45:27] assign x_array_lo_hi = _GEN_35; // @[package.scala:45:27] wire [2:0] r_array_lo_1 = {r_array_lo_hi_1, _entries_barrier_io_y_sx}; // @[package.scala:45:27, :267:25] wire [1:0] _GEN_36 = {_entries_barrier_5_io_y_sx, _entries_barrier_4_io_y_sx}; // @[package.scala:45:27, :267:25] wire [1:0] r_array_hi_hi_1; // @[package.scala:45:27] assign r_array_hi_hi_1 = _GEN_36; // @[package.scala:45:27] wire [1:0] x_array_hi_hi; // @[package.scala:45:27] assign x_array_hi_hi = _GEN_36; // @[package.scala:45:27] wire [2:0] r_array_hi_1 = {r_array_hi_hi_1, _entries_barrier_3_io_y_sx}; // @[package.scala:45:27, :267:25] wire [5:0] _r_array_T_1 = {r_array_hi_1, r_array_lo_1}; // @[package.scala:45:27] wire [5:0] _r_array_T_2 = mxr ? _r_array_T_1 : 6'h0; // @[package.scala:45:27] wire [5:0] _r_array_T_3 = _r_array_T | _r_array_T_2; // @[package.scala:45:27] wire [5:0] _r_array_T_4 = priv_rw_ok & _r_array_T_3; // @[TLB.scala:513:70, :520:{41,69}] wire [5:0] _r_array_T_5 = _r_array_T_4; // @[TLB.scala:520:{41,113}] wire [6:0] r_array = {1'h1, _r_array_T_5}; // @[TLB.scala:520:{20,113}] wire [6:0] _pf_ld_array_T = r_array; // @[TLB.scala:520:20, :597:41] wire [1:0] w_array_lo_hi = {_entries_barrier_2_io_y_sw, _entries_barrier_1_io_y_sw}; // @[package.scala:45:27, :267:25] wire [2:0] w_array_lo = {w_array_lo_hi, _entries_barrier_io_y_sw}; // @[package.scala:45:27, :267:25] wire [1:0] w_array_hi_hi = {_entries_barrier_5_io_y_sw, _entries_barrier_4_io_y_sw}; // @[package.scala:45:27, :267:25] wire [2:0] w_array_hi = {w_array_hi_hi, _entries_barrier_3_io_y_sw}; // @[package.scala:45:27, :267:25] wire [5:0] _w_array_T = {w_array_hi, w_array_lo}; // @[package.scala:45:27] wire [5:0] _w_array_T_1 = priv_rw_ok & _w_array_T; // @[package.scala:45:27] wire [5:0] _w_array_T_2 = _w_array_T_1; // @[TLB.scala:521:{41,69}] wire [6:0] w_array = {1'h1, _w_array_T_2}; // @[TLB.scala:521:{20,69}] wire [2:0] x_array_lo = {x_array_lo_hi, _entries_barrier_io_y_sx}; // @[package.scala:45:27, :267:25] wire [2:0] x_array_hi = {x_array_hi_hi, _entries_barrier_3_io_y_sx}; // @[package.scala:45:27, :267:25] wire [5:0] _x_array_T = {x_array_hi, x_array_lo}; // @[package.scala:45:27] wire [5:0] _x_array_T_1 = priv_x_ok & _x_array_T; // @[package.scala:45:27] wire [5:0] _x_array_T_2 = _x_array_T_1; // @[TLB.scala:522:{40,68}] wire [6:0] x_array = {1'h1, _x_array_T_2}; // @[TLB.scala:522:{20,68}] wire [1:0] hr_array_lo_hi = {_entries_barrier_2_io_y_hr, _entries_barrier_1_io_y_hr}; // @[package.scala:45:27, :267:25] wire [2:0] hr_array_lo = {hr_array_lo_hi, _entries_barrier_io_y_hr}; // @[package.scala:45:27, :267:25] wire [1:0] hr_array_hi_hi = {_entries_barrier_5_io_y_hr, _entries_barrier_4_io_y_hr}; // @[package.scala:45:27, :267:25] wire [2:0] hr_array_hi = {hr_array_hi_hi, _entries_barrier_3_io_y_hr}; // @[package.scala:45:27, :267:25] wire [5:0] _hr_array_T = {hr_array_hi, hr_array_lo}; // @[package.scala:45:27] wire [1:0] _GEN_37 = {_entries_barrier_2_io_y_hx, _entries_barrier_1_io_y_hx}; // @[package.scala:45:27, :267:25] wire [1:0] hr_array_lo_hi_1; // @[package.scala:45:27] assign hr_array_lo_hi_1 = _GEN_37; // @[package.scala:45:27] wire [1:0] hx_array_lo_hi; // @[package.scala:45:27] assign hx_array_lo_hi = _GEN_37; // @[package.scala:45:27] wire [2:0] hr_array_lo_1 = {hr_array_lo_hi_1, _entries_barrier_io_y_hx}; // @[package.scala:45:27, :267:25] wire [1:0] _GEN_38 = {_entries_barrier_5_io_y_hx, _entries_barrier_4_io_y_hx}; // @[package.scala:45:27, :267:25] wire [1:0] hr_array_hi_hi_1; // @[package.scala:45:27] assign hr_array_hi_hi_1 = _GEN_38; // @[package.scala:45:27] wire [1:0] hx_array_hi_hi; // @[package.scala:45:27] assign hx_array_hi_hi = _GEN_38; // @[package.scala:45:27] wire [2:0] hr_array_hi_1 = {hr_array_hi_hi_1, _entries_barrier_3_io_y_hx}; // @[package.scala:45:27, :267:25] wire [5:0] _hr_array_T_1 = {hr_array_hi_1, hr_array_lo_1}; // @[package.scala:45:27] wire [5:0] _hr_array_T_2 = io_ptw_status_mxr_0 ? _hr_array_T_1 : 6'h0; // @[package.scala:45:27] wire [5:0] _hr_array_T_3 = _hr_array_T | _hr_array_T_2; // @[package.scala:45:27] wire [1:0] hw_array_lo_hi = {_entries_barrier_2_io_y_hw, _entries_barrier_1_io_y_hw}; // @[package.scala:45:27, :267:25] wire [2:0] hw_array_lo = {hw_array_lo_hi, _entries_barrier_io_y_hw}; // @[package.scala:45:27, :267:25] wire [1:0] hw_array_hi_hi = {_entries_barrier_5_io_y_hw, _entries_barrier_4_io_y_hw}; // @[package.scala:45:27, :267:25] wire [2:0] hw_array_hi = {hw_array_hi_hi, _entries_barrier_3_io_y_hw}; // @[package.scala:45:27, :267:25] wire [5:0] _hw_array_T = {hw_array_hi, hw_array_lo}; // @[package.scala:45:27] wire [2:0] hx_array_lo = {hx_array_lo_hi, _entries_barrier_io_y_hx}; // @[package.scala:45:27, :267:25] wire [2:0] hx_array_hi = {hx_array_hi_hi, _entries_barrier_3_io_y_hx}; // @[package.scala:45:27, :267:25] wire [5:0] _hx_array_T = {hx_array_hi, hx_array_lo}; // @[package.scala:45:27] wire [1:0] _pr_array_T = {2{prot_r}}; // @[TLB.scala:429:55, :529:26] wire [1:0] pr_array_lo = {_entries_barrier_1_io_y_pr, _entries_barrier_io_y_pr}; // @[package.scala:45:27, :267:25] wire [1:0] pr_array_hi_hi = {_entries_barrier_4_io_y_pr, _entries_barrier_3_io_y_pr}; // @[package.scala:45:27, :267:25] wire [2:0] pr_array_hi = {pr_array_hi_hi, _entries_barrier_2_io_y_pr}; // @[package.scala:45:27, :267:25] wire [4:0] _pr_array_T_1 = {pr_array_hi, pr_array_lo}; // @[package.scala:45:27] wire [6:0] _pr_array_T_2 = {_pr_array_T, _pr_array_T_1}; // @[package.scala:45:27] wire [6:0] _GEN_39 = ptw_ae_array | final_ae_array; // @[TLB.scala:506:25, :507:27, :529:104] wire [6:0] _pr_array_T_3; // @[TLB.scala:529:104] assign _pr_array_T_3 = _GEN_39; // @[TLB.scala:529:104] wire [6:0] _pw_array_T_3; // @[TLB.scala:531:104] assign _pw_array_T_3 = _GEN_39; // @[TLB.scala:529:104, :531:104] wire [6:0] _px_array_T_3; // @[TLB.scala:533:104] assign _px_array_T_3 = _GEN_39; // @[TLB.scala:529:104, :533:104] wire [6:0] _pr_array_T_4 = ~_pr_array_T_3; // @[TLB.scala:529:{89,104}] wire [6:0] pr_array = _pr_array_T_2 & _pr_array_T_4; // @[TLB.scala:529:{21,87,89}] wire [1:0] _pw_array_T = {2{prot_w}}; // @[TLB.scala:430:55, :531:26] wire [1:0] pw_array_lo = {_entries_barrier_1_io_y_pw, _entries_barrier_io_y_pw}; // @[package.scala:45:27, :267:25] wire [1:0] pw_array_hi_hi = {_entries_barrier_4_io_y_pw, _entries_barrier_3_io_y_pw}; // @[package.scala:45:27, :267:25] wire [2:0] pw_array_hi = {pw_array_hi_hi, _entries_barrier_2_io_y_pw}; // @[package.scala:45:27, :267:25] wire [4:0] _pw_array_T_1 = {pw_array_hi, pw_array_lo}; // @[package.scala:45:27] wire [6:0] _pw_array_T_2 = {_pw_array_T, _pw_array_T_1}; // @[package.scala:45:27] wire [6:0] _pw_array_T_4 = ~_pw_array_T_3; // @[TLB.scala:531:{89,104}] wire [6:0] pw_array = _pw_array_T_2 & _pw_array_T_4; // @[TLB.scala:531:{21,87,89}] wire [1:0] _px_array_T = {2{prot_x}}; // @[TLB.scala:434:55, :533:26] wire [1:0] px_array_lo = {_entries_barrier_1_io_y_px, _entries_barrier_io_y_px}; // @[package.scala:45:27, :267:25] wire [1:0] px_array_hi_hi = {_entries_barrier_4_io_y_px, _entries_barrier_3_io_y_px}; // @[package.scala:45:27, :267:25] wire [2:0] px_array_hi = {px_array_hi_hi, _entries_barrier_2_io_y_px}; // @[package.scala:45:27, :267:25] wire [4:0] _px_array_T_1 = {px_array_hi, px_array_lo}; // @[package.scala:45:27] wire [6:0] _px_array_T_2 = {_px_array_T, _px_array_T_1}; // @[package.scala:45:27] wire [6:0] _px_array_T_4 = ~_px_array_T_3; // @[TLB.scala:533:{89,104}] wire [6:0] px_array = _px_array_T_2 & _px_array_T_4; // @[TLB.scala:533:{21,87,89}] wire [1:0] _eff_array_T = {2{_pma_io_resp_eff}}; // @[TLB.scala:422:19, :535:27] wire [1:0] eff_array_lo = {_entries_barrier_1_io_y_eff, _entries_barrier_io_y_eff}; // @[package.scala:45:27, :267:25] wire [1:0] eff_array_hi_hi = {_entries_barrier_4_io_y_eff, _entries_barrier_3_io_y_eff}; // @[package.scala:45:27, :267:25] wire [2:0] eff_array_hi = {eff_array_hi_hi, _entries_barrier_2_io_y_eff}; // @[package.scala:45:27, :267:25] wire [4:0] _eff_array_T_1 = {eff_array_hi, eff_array_lo}; // @[package.scala:45:27] wire [6:0] eff_array = {_eff_array_T, _eff_array_T_1}; // @[package.scala:45:27] wire [1:0] _c_array_T = {2{cacheable}}; // @[TLB.scala:425:41, :537:25] wire [1:0] _GEN_40 = {_entries_barrier_1_io_y_c, _entries_barrier_io_y_c}; // @[package.scala:45:27, :267:25] wire [1:0] c_array_lo; // @[package.scala:45:27] assign c_array_lo = _GEN_40; // @[package.scala:45:27] wire [1:0] prefetchable_array_lo; // @[package.scala:45:27] assign prefetchable_array_lo = _GEN_40; // @[package.scala:45:27] wire [1:0] _GEN_41 = {_entries_barrier_4_io_y_c, _entries_barrier_3_io_y_c}; // @[package.scala:45:27, :267:25] wire [1:0] c_array_hi_hi; // @[package.scala:45:27] assign c_array_hi_hi = _GEN_41; // @[package.scala:45:27] wire [1:0] prefetchable_array_hi_hi; // @[package.scala:45:27] assign prefetchable_array_hi_hi = _GEN_41; // @[package.scala:45:27] wire [2:0] c_array_hi = {c_array_hi_hi, _entries_barrier_2_io_y_c}; // @[package.scala:45:27, :267:25] wire [4:0] _c_array_T_1 = {c_array_hi, c_array_lo}; // @[package.scala:45:27] wire [6:0] c_array = {_c_array_T, _c_array_T_1}; // @[package.scala:45:27] wire [6:0] lrscAllowed = c_array; // @[TLB.scala:537:20, :580:24] wire [1:0] _ppp_array_T = {2{_pma_io_resp_pp}}; // @[TLB.scala:422:19, :539:27] wire [1:0] ppp_array_lo = {_entries_barrier_1_io_y_ppp, _entries_barrier_io_y_ppp}; // @[package.scala:45:27, :267:25] wire [1:0] ppp_array_hi_hi = {_entries_barrier_4_io_y_ppp, _entries_barrier_3_io_y_ppp}; // @[package.scala:45:27, :267:25] wire [2:0] ppp_array_hi = {ppp_array_hi_hi, _entries_barrier_2_io_y_ppp}; // @[package.scala:45:27, :267:25] wire [4:0] _ppp_array_T_1 = {ppp_array_hi, ppp_array_lo}; // @[package.scala:45:27] wire [6:0] ppp_array = {_ppp_array_T, _ppp_array_T_1}; // @[package.scala:45:27] wire [1:0] _paa_array_T = {2{_pma_io_resp_aa}}; // @[TLB.scala:422:19, :541:27] wire [1:0] paa_array_lo = {_entries_barrier_1_io_y_paa, _entries_barrier_io_y_paa}; // @[package.scala:45:27, :267:25] wire [1:0] paa_array_hi_hi = {_entries_barrier_4_io_y_paa, _entries_barrier_3_io_y_paa}; // @[package.scala:45:27, :267:25] wire [2:0] paa_array_hi = {paa_array_hi_hi, _entries_barrier_2_io_y_paa}; // @[package.scala:45:27, :267:25] wire [4:0] _paa_array_T_1 = {paa_array_hi, paa_array_lo}; // @[package.scala:45:27] wire [6:0] paa_array = {_paa_array_T, _paa_array_T_1}; // @[package.scala:45:27] wire [1:0] _pal_array_T = {2{_pma_io_resp_al}}; // @[TLB.scala:422:19, :543:27] wire [1:0] pal_array_lo = {_entries_barrier_1_io_y_pal, _entries_barrier_io_y_pal}; // @[package.scala:45:27, :267:25] wire [1:0] pal_array_hi_hi = {_entries_barrier_4_io_y_pal, _entries_barrier_3_io_y_pal}; // @[package.scala:45:27, :267:25] wire [2:0] pal_array_hi = {pal_array_hi_hi, _entries_barrier_2_io_y_pal}; // @[package.scala:45:27, :267:25] wire [4:0] _pal_array_T_1 = {pal_array_hi, pal_array_lo}; // @[package.scala:45:27] wire [6:0] pal_array = {_pal_array_T, _pal_array_T_1}; // @[package.scala:45:27] wire [6:0] ppp_array_if_cached = ppp_array | c_array; // @[TLB.scala:537:20, :539:22, :544:39] wire [6:0] paa_array_if_cached = paa_array | c_array; // @[TLB.scala:537:20, :541:22, :545:39] wire [6:0] pal_array_if_cached = pal_array | c_array; // @[TLB.scala:537:20, :543:22, :546:39] wire _prefetchable_array_T = cacheable & homogeneous; // @[TLBPermissions.scala:101:65] wire [1:0] _prefetchable_array_T_1 = {_prefetchable_array_T, 1'h0}; // @[TLB.scala:547:{43,59}] wire [2:0] prefetchable_array_hi = {prefetchable_array_hi_hi, _entries_barrier_2_io_y_c}; // @[package.scala:45:27, :267:25] wire [4:0] _prefetchable_array_T_2 = {prefetchable_array_hi, prefetchable_array_lo}; // @[package.scala:45:27] wire [6:0] prefetchable_array = {_prefetchable_array_T_1, _prefetchable_array_T_2}; // @[package.scala:45:27] wire [3:0] _misaligned_T = 4'h1 << io_req_bits_size_0; // @[OneHot.scala:58:35] wire [4:0] _misaligned_T_1 = {1'h0, _misaligned_T} - 5'h1; // @[OneHot.scala:58:35] wire [3:0] _misaligned_T_2 = _misaligned_T_1[3:0]; // @[TLB.scala:550:69] wire [39:0] _misaligned_T_3 = {36'h0, io_req_bits_vaddr_0[3:0] & _misaligned_T_2}; // @[TLB.scala:318:7, :550:{39,69}] wire misaligned = |_misaligned_T_3; // @[TLB.scala:550:{39,77}] wire _bad_va_T = vm_enabled & stage1_en; // @[TLB.scala:374:29, :399:61, :568:21] wire [39:0] bad_va_maskedVAddr = io_req_bits_vaddr_0 & 40'hC000000000; // @[TLB.scala:318:7, :559:43] wire _bad_va_T_2 = bad_va_maskedVAddr == 40'h0; // @[TLB.scala:550:77, :559:43, :560:51] wire _bad_va_T_3 = bad_va_maskedVAddr == 40'hC000000000; // @[TLB.scala:559:43, :560:86] wire _bad_va_T_4 = _bad_va_T_3; // @[TLB.scala:560:{71,86}] wire _bad_va_T_5 = _bad_va_T_2 | _bad_va_T_4; // @[TLB.scala:560:{51,59,71}] wire _bad_va_T_6 = ~_bad_va_T_5; // @[TLB.scala:560:{37,59}] wire _bad_va_T_7 = _bad_va_T_6; // @[TLB.scala:560:{34,37}] wire bad_va = _bad_va_T & _bad_va_T_7; // @[TLB.scala:560:34, :568:{21,34}] wire _GEN_42 = io_req_bits_cmd_0 == 5'h6; // @[package.scala:16:47] wire _cmd_lrsc_T; // @[package.scala:16:47] assign _cmd_lrsc_T = _GEN_42; // @[package.scala:16:47] wire _cmd_read_T_2; // @[package.scala:16:47] assign _cmd_read_T_2 = _GEN_42; // @[package.scala:16:47] wire _GEN_43 = io_req_bits_cmd_0 == 5'h7; // @[package.scala:16:47] wire _cmd_lrsc_T_1; // @[package.scala:16:47] assign _cmd_lrsc_T_1 = _GEN_43; // @[package.scala:16:47] wire _cmd_read_T_3; // @[package.scala:16:47] assign _cmd_read_T_3 = _GEN_43; // @[package.scala:16:47] wire _cmd_write_T_3; // @[Consts.scala:90:66] assign _cmd_write_T_3 = _GEN_43; // @[package.scala:16:47] wire _cmd_lrsc_T_2 = _cmd_lrsc_T | _cmd_lrsc_T_1; // @[package.scala:16:47, :81:59] wire cmd_lrsc = _cmd_lrsc_T_2; // @[package.scala:81:59] wire _GEN_44 = io_req_bits_cmd_0 == 5'h4; // @[package.scala:16:47] wire _cmd_amo_logical_T; // @[package.scala:16:47] assign _cmd_amo_logical_T = _GEN_44; // @[package.scala:16:47] wire _cmd_read_T_7; // @[package.scala:16:47] assign _cmd_read_T_7 = _GEN_44; // @[package.scala:16:47] wire _cmd_write_T_5; // @[package.scala:16:47] assign _cmd_write_T_5 = _GEN_44; // @[package.scala:16:47] wire _GEN_45 = io_req_bits_cmd_0 == 5'h9; // @[package.scala:16:47] wire _cmd_amo_logical_T_1; // @[package.scala:16:47] assign _cmd_amo_logical_T_1 = _GEN_45; // @[package.scala:16:47] wire _cmd_read_T_8; // @[package.scala:16:47] assign _cmd_read_T_8 = _GEN_45; // @[package.scala:16:47] wire _cmd_write_T_6; // @[package.scala:16:47] assign _cmd_write_T_6 = _GEN_45; // @[package.scala:16:47] wire _GEN_46 = io_req_bits_cmd_0 == 5'hA; // @[package.scala:16:47] wire _cmd_amo_logical_T_2; // @[package.scala:16:47] assign _cmd_amo_logical_T_2 = _GEN_46; // @[package.scala:16:47] wire _cmd_read_T_9; // @[package.scala:16:47] assign _cmd_read_T_9 = _GEN_46; // @[package.scala:16:47] wire _cmd_write_T_7; // @[package.scala:16:47] assign _cmd_write_T_7 = _GEN_46; // @[package.scala:16:47] wire _GEN_47 = io_req_bits_cmd_0 == 5'hB; // @[package.scala:16:47] wire _cmd_amo_logical_T_3; // @[package.scala:16:47] assign _cmd_amo_logical_T_3 = _GEN_47; // @[package.scala:16:47] wire _cmd_read_T_10; // @[package.scala:16:47] assign _cmd_read_T_10 = _GEN_47; // @[package.scala:16:47] wire _cmd_write_T_8; // @[package.scala:16:47] assign _cmd_write_T_8 = _GEN_47; // @[package.scala:16:47] wire _cmd_amo_logical_T_4 = _cmd_amo_logical_T | _cmd_amo_logical_T_1; // @[package.scala:16:47, :81:59] wire _cmd_amo_logical_T_5 = _cmd_amo_logical_T_4 | _cmd_amo_logical_T_2; // @[package.scala:16:47, :81:59] wire _cmd_amo_logical_T_6 = _cmd_amo_logical_T_5 | _cmd_amo_logical_T_3; // @[package.scala:16:47, :81:59] wire cmd_amo_logical = _cmd_amo_logical_T_6; // @[package.scala:81:59] wire _GEN_48 = io_req_bits_cmd_0 == 5'h8; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T; // @[package.scala:16:47] assign _cmd_amo_arithmetic_T = _GEN_48; // @[package.scala:16:47] wire _cmd_read_T_14; // @[package.scala:16:47] assign _cmd_read_T_14 = _GEN_48; // @[package.scala:16:47] wire _cmd_write_T_12; // @[package.scala:16:47] assign _cmd_write_T_12 = _GEN_48; // @[package.scala:16:47] wire _GEN_49 = io_req_bits_cmd_0 == 5'hC; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T_1; // @[package.scala:16:47] assign _cmd_amo_arithmetic_T_1 = _GEN_49; // @[package.scala:16:47] wire _cmd_read_T_15; // @[package.scala:16:47] assign _cmd_read_T_15 = _GEN_49; // @[package.scala:16:47] wire _cmd_write_T_13; // @[package.scala:16:47] assign _cmd_write_T_13 = _GEN_49; // @[package.scala:16:47] wire _GEN_50 = io_req_bits_cmd_0 == 5'hD; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T_2; // @[package.scala:16:47] assign _cmd_amo_arithmetic_T_2 = _GEN_50; // @[package.scala:16:47] wire _cmd_read_T_16; // @[package.scala:16:47] assign _cmd_read_T_16 = _GEN_50; // @[package.scala:16:47] wire _cmd_write_T_14; // @[package.scala:16:47] assign _cmd_write_T_14 = _GEN_50; // @[package.scala:16:47] wire _GEN_51 = io_req_bits_cmd_0 == 5'hE; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T_3; // @[package.scala:16:47] assign _cmd_amo_arithmetic_T_3 = _GEN_51; // @[package.scala:16:47] wire _cmd_read_T_17; // @[package.scala:16:47] assign _cmd_read_T_17 = _GEN_51; // @[package.scala:16:47] wire _cmd_write_T_15; // @[package.scala:16:47] assign _cmd_write_T_15 = _GEN_51; // @[package.scala:16:47] wire _GEN_52 = io_req_bits_cmd_0 == 5'hF; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T_4; // @[package.scala:16:47] assign _cmd_amo_arithmetic_T_4 = _GEN_52; // @[package.scala:16:47] wire _cmd_read_T_18; // @[package.scala:16:47] assign _cmd_read_T_18 = _GEN_52; // @[package.scala:16:47] wire _cmd_write_T_16; // @[package.scala:16:47] assign _cmd_write_T_16 = _GEN_52; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T_5 = _cmd_amo_arithmetic_T | _cmd_amo_arithmetic_T_1; // @[package.scala:16:47, :81:59] wire _cmd_amo_arithmetic_T_6 = _cmd_amo_arithmetic_T_5 | _cmd_amo_arithmetic_T_2; // @[package.scala:16:47, :81:59] wire _cmd_amo_arithmetic_T_7 = _cmd_amo_arithmetic_T_6 | _cmd_amo_arithmetic_T_3; // @[package.scala:16:47, :81:59] wire _cmd_amo_arithmetic_T_8 = _cmd_amo_arithmetic_T_7 | _cmd_amo_arithmetic_T_4; // @[package.scala:16:47, :81:59] wire cmd_amo_arithmetic = _cmd_amo_arithmetic_T_8; // @[package.scala:81:59] wire _GEN_53 = io_req_bits_cmd_0 == 5'h11; // @[TLB.scala:318:7, :573:41] wire cmd_put_partial; // @[TLB.scala:573:41] assign cmd_put_partial = _GEN_53; // @[TLB.scala:573:41] wire _cmd_write_T_1; // @[Consts.scala:90:49] assign _cmd_write_T_1 = _GEN_53; // @[TLB.scala:573:41] wire _cmd_read_T = io_req_bits_cmd_0 == 5'h0; // @[package.scala:16:47] wire _GEN_54 = io_req_bits_cmd_0 == 5'h10; // @[package.scala:16:47] wire _cmd_read_T_1; // @[package.scala:16:47] assign _cmd_read_T_1 = _GEN_54; // @[package.scala:16:47] wire _cmd_readx_T; // @[TLB.scala:575:56] assign _cmd_readx_T = _GEN_54; // @[package.scala:16:47] wire _cmd_read_T_4 = _cmd_read_T | _cmd_read_T_1; // @[package.scala:16:47, :81:59] wire _cmd_read_T_5 = _cmd_read_T_4 | _cmd_read_T_2; // @[package.scala:16:47, :81:59] wire _cmd_read_T_6 = _cmd_read_T_5 | _cmd_read_T_3; // @[package.scala:16:47, :81:59] wire _cmd_read_T_11 = _cmd_read_T_7 | _cmd_read_T_8; // @[package.scala:16:47, :81:59] wire _cmd_read_T_12 = _cmd_read_T_11 | _cmd_read_T_9; // @[package.scala:16:47, :81:59] wire _cmd_read_T_13 = _cmd_read_T_12 | _cmd_read_T_10; // @[package.scala:16:47, :81:59] wire _cmd_read_T_19 = _cmd_read_T_14 | _cmd_read_T_15; // @[package.scala:16:47, :81:59] wire _cmd_read_T_20 = _cmd_read_T_19 | _cmd_read_T_16; // @[package.scala:16:47, :81:59] wire _cmd_read_T_21 = _cmd_read_T_20 | _cmd_read_T_17; // @[package.scala:16:47, :81:59] wire _cmd_read_T_22 = _cmd_read_T_21 | _cmd_read_T_18; // @[package.scala:16:47, :81:59] wire _cmd_read_T_23 = _cmd_read_T_13 | _cmd_read_T_22; // @[package.scala:81:59] wire cmd_read = _cmd_read_T_6 | _cmd_read_T_23; // @[package.scala:81:59] wire _cmd_write_T = io_req_bits_cmd_0 == 5'h1; // @[TLB.scala:318:7] wire _cmd_write_T_2 = _cmd_write_T | _cmd_write_T_1; // @[Consts.scala:90:{32,42,49}] wire _cmd_write_T_4 = _cmd_write_T_2 | _cmd_write_T_3; // @[Consts.scala:90:{42,59,66}] wire _cmd_write_T_9 = _cmd_write_T_5 | _cmd_write_T_6; // @[package.scala:16:47, :81:59] wire _cmd_write_T_10 = _cmd_write_T_9 | _cmd_write_T_7; // @[package.scala:16:47, :81:59] wire _cmd_write_T_11 = _cmd_write_T_10 | _cmd_write_T_8; // @[package.scala:16:47, :81:59] wire _cmd_write_T_17 = _cmd_write_T_12 | _cmd_write_T_13; // @[package.scala:16:47, :81:59] wire _cmd_write_T_18 = _cmd_write_T_17 | _cmd_write_T_14; // @[package.scala:16:47, :81:59] wire _cmd_write_T_19 = _cmd_write_T_18 | _cmd_write_T_15; // @[package.scala:16:47, :81:59] wire _cmd_write_T_20 = _cmd_write_T_19 | _cmd_write_T_16; // @[package.scala:16:47, :81:59] wire _cmd_write_T_21 = _cmd_write_T_11 | _cmd_write_T_20; // @[package.scala:81:59] wire cmd_write = _cmd_write_T_4 | _cmd_write_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _cmd_write_perms_T = io_req_bits_cmd_0 == 5'h5; // @[package.scala:16:47] wire _cmd_write_perms_T_1 = io_req_bits_cmd_0 == 5'h17; // @[package.scala:16:47] wire _cmd_write_perms_T_2 = _cmd_write_perms_T | _cmd_write_perms_T_1; // @[package.scala:16:47, :81:59] wire cmd_write_perms = cmd_write | _cmd_write_perms_T_2; // @[package.scala:81:59] wire [6:0] _ae_array_T = misaligned ? eff_array : 7'h0; // @[TLB.scala:535:22, :550:77, :582:8] wire [6:0] _ae_array_T_1 = ~lrscAllowed; // @[TLB.scala:580:24, :583:19] wire [6:0] _ae_array_T_2 = cmd_lrsc ? _ae_array_T_1 : 7'h0; // @[TLB.scala:570:33, :583:{8,19}] wire [6:0] ae_array = _ae_array_T | _ae_array_T_2; // @[TLB.scala:582:{8,37}, :583:8] wire [6:0] _ae_ld_array_T = ~pr_array; // @[TLB.scala:529:87, :586:46] wire [6:0] _ae_ld_array_T_1 = ae_array | _ae_ld_array_T; // @[TLB.scala:582:37, :586:{44,46}] wire [6:0] ae_ld_array = cmd_read ? _ae_ld_array_T_1 : 7'h0; // @[TLB.scala:586:{24,44}] wire [6:0] _ae_st_array_T = ~pw_array; // @[TLB.scala:531:87, :588:37] wire [6:0] _ae_st_array_T_1 = ae_array | _ae_st_array_T; // @[TLB.scala:582:37, :588:{35,37}] wire [6:0] _ae_st_array_T_2 = cmd_write_perms ? _ae_st_array_T_1 : 7'h0; // @[TLB.scala:577:35, :588:{8,35}] wire [6:0] _ae_st_array_T_3 = ~ppp_array_if_cached; // @[TLB.scala:544:39, :589:26] wire [6:0] _ae_st_array_T_4 = cmd_put_partial ? _ae_st_array_T_3 : 7'h0; // @[TLB.scala:573:41, :589:{8,26}] wire [6:0] _ae_st_array_T_5 = _ae_st_array_T_2 | _ae_st_array_T_4; // @[TLB.scala:588:{8,53}, :589:8] wire [6:0] _ae_st_array_T_6 = ~pal_array_if_cached; // @[TLB.scala:546:39, :590:26] wire [6:0] _ae_st_array_T_7 = cmd_amo_logical ? _ae_st_array_T_6 : 7'h0; // @[TLB.scala:571:40, :590:{8,26}] wire [6:0] _ae_st_array_T_8 = _ae_st_array_T_5 | _ae_st_array_T_7; // @[TLB.scala:588:53, :589:53, :590:8] wire [6:0] _ae_st_array_T_9 = ~paa_array_if_cached; // @[TLB.scala:545:39, :591:29] wire [6:0] _ae_st_array_T_10 = cmd_amo_arithmetic ? _ae_st_array_T_9 : 7'h0; // @[TLB.scala:572:43, :591:{8,29}] wire [6:0] ae_st_array = _ae_st_array_T_8 | _ae_st_array_T_10; // @[TLB.scala:589:53, :590:53, :591:8] wire [6:0] _must_alloc_array_T = ~ppp_array; // @[TLB.scala:539:22, :593:26] wire [6:0] _must_alloc_array_T_1 = cmd_put_partial ? _must_alloc_array_T : 7'h0; // @[TLB.scala:573:41, :593:{8,26}] wire [6:0] _must_alloc_array_T_2 = ~pal_array; // @[TLB.scala:543:22, :594:26] wire [6:0] _must_alloc_array_T_3 = cmd_amo_logical ? _must_alloc_array_T_2 : 7'h0; // @[TLB.scala:571:40, :594:{8,26}] wire [6:0] _must_alloc_array_T_4 = _must_alloc_array_T_1 | _must_alloc_array_T_3; // @[TLB.scala:593:{8,43}, :594:8] wire [6:0] _must_alloc_array_T_5 = ~paa_array; // @[TLB.scala:541:22, :595:29] wire [6:0] _must_alloc_array_T_6 = cmd_amo_arithmetic ? _must_alloc_array_T_5 : 7'h0; // @[TLB.scala:572:43, :595:{8,29}] wire [6:0] _must_alloc_array_T_7 = _must_alloc_array_T_4 | _must_alloc_array_T_6; // @[TLB.scala:593:43, :594:43, :595:8] wire [6:0] _must_alloc_array_T_9 = {7{cmd_lrsc}}; // @[TLB.scala:570:33, :596:8] wire [6:0] must_alloc_array = _must_alloc_array_T_7 | _must_alloc_array_T_9; // @[TLB.scala:594:43, :595:46, :596:8] wire [6:0] _pf_ld_array_T_1 = ~_pf_ld_array_T; // @[TLB.scala:597:{37,41}] wire [6:0] _pf_ld_array_T_2 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73] wire [6:0] _pf_ld_array_T_3 = _pf_ld_array_T_1 & _pf_ld_array_T_2; // @[TLB.scala:597:{37,71,73}] wire [6:0] _pf_ld_array_T_4 = _pf_ld_array_T_3 | ptw_pf_array; // @[TLB.scala:508:25, :597:{71,88}] wire [6:0] _pf_ld_array_T_5 = ~ptw_gf_array; // @[TLB.scala:509:25, :597:106] wire [6:0] _pf_ld_array_T_6 = _pf_ld_array_T_4 & _pf_ld_array_T_5; // @[TLB.scala:597:{88,104,106}] wire [6:0] pf_ld_array = cmd_read ? _pf_ld_array_T_6 : 7'h0; // @[TLB.scala:597:{24,104}] wire [6:0] _pf_st_array_T = ~w_array; // @[TLB.scala:521:20, :598:44] wire [6:0] _pf_st_array_T_1 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73, :598:55] wire [6:0] _pf_st_array_T_2 = _pf_st_array_T & _pf_st_array_T_1; // @[TLB.scala:598:{44,53,55}] wire [6:0] _pf_st_array_T_3 = _pf_st_array_T_2 | ptw_pf_array; // @[TLB.scala:508:25, :598:{53,70}] wire [6:0] _pf_st_array_T_4 = ~ptw_gf_array; // @[TLB.scala:509:25, :597:106, :598:88] wire [6:0] _pf_st_array_T_5 = _pf_st_array_T_3 & _pf_st_array_T_4; // @[TLB.scala:598:{70,86,88}] wire [6:0] pf_st_array = cmd_write_perms ? _pf_st_array_T_5 : 7'h0; // @[TLB.scala:577:35, :598:{24,86}] wire [6:0] _pf_inst_array_T = ~x_array; // @[TLB.scala:522:20, :599:25] wire [6:0] _pf_inst_array_T_1 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73, :599:36] wire [6:0] _pf_inst_array_T_2 = _pf_inst_array_T & _pf_inst_array_T_1; // @[TLB.scala:599:{25,34,36}] wire [6:0] _pf_inst_array_T_3 = _pf_inst_array_T_2 | ptw_pf_array; // @[TLB.scala:508:25, :599:{34,51}] wire [6:0] _pf_inst_array_T_4 = ~ptw_gf_array; // @[TLB.scala:509:25, :597:106, :599:69] wire [6:0] pf_inst_array = _pf_inst_array_T_3 & _pf_inst_array_T_4; // @[TLB.scala:599:{51,67,69}] wire [6:0] _gf_ld_array_T_4 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73, :600:100] wire [6:0] _gf_ld_array_T_5 = _gf_ld_array_T_3 & _gf_ld_array_T_4; // @[TLB.scala:600:{82,98,100}] wire [6:0] _gf_st_array_T_3 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73, :601:81] wire [6:0] _gf_st_array_T_4 = _gf_st_array_T_2 & _gf_st_array_T_3; // @[TLB.scala:601:{63,79,81}] wire [6:0] _gf_inst_array_T_2 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73, :602:64] wire [6:0] _gf_inst_array_T_3 = _gf_inst_array_T_1 & _gf_inst_array_T_2; // @[TLB.scala:602:{46,62,64}] wire _gpa_hits_hit_mask_T = r_gpa_vpn == vpn; // @[TLB.scala:335:30, :364:22, :606:73] wire _gpa_hits_hit_mask_T_1 = r_gpa_valid & _gpa_hits_hit_mask_T; // @[TLB.scala:362:24, :606:{60,73}] wire [4:0] _gpa_hits_hit_mask_T_2 = {5{_gpa_hits_hit_mask_T_1}}; // @[TLB.scala:606:{24,60}] wire tlb_hit_if_not_gpa_miss = |real_hits; // @[package.scala:45:27] wire tlb_hit = |_tlb_hit_T; // @[TLB.scala:611:{28,40}] wire _tlb_miss_T_2 = ~bad_va; // @[TLB.scala:568:34, :613:56] wire _tlb_miss_T_3 = _tlb_miss_T_1 & _tlb_miss_T_2; // @[TLB.scala:613:{29,53,56}] wire _tlb_miss_T_4 = ~tlb_hit; // @[TLB.scala:611:40, :613:67] wire tlb_miss = _tlb_miss_T_3 & _tlb_miss_T_4; // @[TLB.scala:613:{53,64,67}] reg [2:0] state_vec_0; // @[Replacement.scala:305:17] reg [2:0] state_vec_1; // @[Replacement.scala:305:17] reg [2:0] state_vec_2; // @[Replacement.scala:305:17] reg [2:0] state_vec_3; // @[Replacement.scala:305:17] wire [1:0] _GEN_55 = {sector_hits_1, sector_hits_0}; // @[OneHot.scala:21:45] wire [1:0] lo; // @[OneHot.scala:21:45] assign lo = _GEN_55; // @[OneHot.scala:21:45] wire [1:0] r_sectored_hit_bits_lo; // @[OneHot.scala:21:45] assign r_sectored_hit_bits_lo = _GEN_55; // @[OneHot.scala:21:45] wire [1:0] lo_1 = lo; // @[OneHot.scala:21:45, :31:18] wire [1:0] _GEN_56 = {sector_hits_3, sector_hits_2}; // @[OneHot.scala:21:45] wire [1:0] hi; // @[OneHot.scala:21:45] assign hi = _GEN_56; // @[OneHot.scala:21:45] wire [1:0] r_sectored_hit_bits_hi; // @[OneHot.scala:21:45] assign r_sectored_hit_bits_hi = _GEN_56; // @[OneHot.scala:21:45] wire [1:0] hi_1 = hi; // @[OneHot.scala:21:45, :30:18] wire [1:0] state_vec_touch_way_sized = {|hi_1, hi_1[1] | lo_1[1]}; // @[OneHot.scala:30:18, :31:18, :32:{10,14,28}] wire _state_vec_set_left_older_T = state_vec_touch_way_sized[1]; // @[package.scala:163:13] wire state_vec_set_left_older = ~_state_vec_set_left_older_T; // @[Replacement.scala:196:{33,43}] wire [3:0][2:0] _GEN_57 = {{state_vec_3}, {state_vec_2}, {state_vec_1}, {state_vec_0}}; // @[package.scala:163:13] wire state_vec_left_subtree_state = _GEN_57[memIdx][1]; // @[package.scala:163:13] wire r_sectored_repl_addr_left_subtree_state = _GEN_57[memIdx][1]; // @[package.scala:163:13] wire state_vec_right_subtree_state = _GEN_57[memIdx][0]; // @[package.scala:163:13] wire r_sectored_repl_addr_right_subtree_state = _GEN_57[memIdx][0]; // @[package.scala:163:13] wire _state_vec_T = state_vec_touch_way_sized[0]; // @[package.scala:163:13] wire _state_vec_T_4 = state_vec_touch_way_sized[0]; // @[package.scala:163:13] wire _state_vec_T_1 = _state_vec_T; // @[package.scala:163:13] wire _state_vec_T_2 = ~_state_vec_T_1; // @[Replacement.scala:218:{7,17}] wire _state_vec_T_3 = state_vec_set_left_older ? state_vec_left_subtree_state : _state_vec_T_2; // @[package.scala:163:13] wire _state_vec_T_5 = _state_vec_T_4; // @[Replacement.scala:207:62, :218:17] wire _state_vec_T_6 = ~_state_vec_T_5; // @[Replacement.scala:218:{7,17}] wire _state_vec_T_7 = state_vec_set_left_older ? _state_vec_T_6 : state_vec_right_subtree_state; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7] wire [1:0] state_vec_hi = {state_vec_set_left_older, _state_vec_T_3}; // @[Replacement.scala:196:33, :202:12, :203:16] wire [2:0] _state_vec_T_8 = {state_vec_hi, _state_vec_T_7}; // @[Replacement.scala:202:12, :206:16] wire [2:0] _multipleHits_T = real_hits[2:0]; // @[package.scala:45:27] wire _multipleHits_T_1 = _multipleHits_T[0]; // @[Misc.scala:181:37] wire multipleHits_leftOne = _multipleHits_T_1; // @[Misc.scala:178:18, :181:37] wire [1:0] _multipleHits_T_2 = _multipleHits_T[2:1]; // @[Misc.scala:181:37, :182:39] wire _multipleHits_T_3 = _multipleHits_T_2[0]; // @[Misc.scala:181:37, :182:39] wire multipleHits_leftOne_1 = _multipleHits_T_3; // @[Misc.scala:178:18, :181:37] wire _multipleHits_T_4 = _multipleHits_T_2[1]; // @[Misc.scala:182:39] wire multipleHits_rightOne = _multipleHits_T_4; // @[Misc.scala:178:18, :182:39] wire multipleHits_rightOne_1 = multipleHits_leftOne_1 | multipleHits_rightOne; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_6 = multipleHits_leftOne_1 & multipleHits_rightOne; // @[Misc.scala:178:18, :183:61] wire multipleHits_rightTwo = _multipleHits_T_6; // @[Misc.scala:183:{49,61}] wire _multipleHits_T_7 = multipleHits_rightTwo; // @[Misc.scala:183:{37,49}] wire multipleHits_leftOne_2 = multipleHits_leftOne | multipleHits_rightOne_1; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_8 = multipleHits_leftOne & multipleHits_rightOne_1; // @[Misc.scala:178:18, :183:{16,61}] wire multipleHits_leftTwo = _multipleHits_T_7 | _multipleHits_T_8; // @[Misc.scala:183:{37,49,61}] wire [2:0] _multipleHits_T_9 = real_hits[5:3]; // @[package.scala:45:27] wire _multipleHits_T_10 = _multipleHits_T_9[0]; // @[Misc.scala:181:37, :182:39] wire multipleHits_leftOne_3 = _multipleHits_T_10; // @[Misc.scala:178:18, :181:37] wire [1:0] _multipleHits_T_11 = _multipleHits_T_9[2:1]; // @[Misc.scala:182:39] wire _multipleHits_T_12 = _multipleHits_T_11[0]; // @[Misc.scala:181:37, :182:39] wire multipleHits_leftOne_4 = _multipleHits_T_12; // @[Misc.scala:178:18, :181:37] wire _multipleHits_T_13 = _multipleHits_T_11[1]; // @[Misc.scala:182:39] wire multipleHits_rightOne_2 = _multipleHits_T_13; // @[Misc.scala:178:18, :182:39] wire multipleHits_rightOne_3 = multipleHits_leftOne_4 | multipleHits_rightOne_2; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_15 = multipleHits_leftOne_4 & multipleHits_rightOne_2; // @[Misc.scala:178:18, :183:61] wire multipleHits_rightTwo_1 = _multipleHits_T_15; // @[Misc.scala:183:{49,61}] wire _multipleHits_T_16 = multipleHits_rightTwo_1; // @[Misc.scala:183:{37,49}] wire multipleHits_rightOne_4 = multipleHits_leftOne_3 | multipleHits_rightOne_3; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_17 = multipleHits_leftOne_3 & multipleHits_rightOne_3; // @[Misc.scala:178:18, :183:{16,61}] wire multipleHits_rightTwo_2 = _multipleHits_T_16 | _multipleHits_T_17; // @[Misc.scala:183:{37,49,61}] wire _multipleHits_T_18 = multipleHits_leftOne_2 | multipleHits_rightOne_4; // @[Misc.scala:183:16] wire _multipleHits_T_19 = multipleHits_leftTwo | multipleHits_rightTwo_2; // @[Misc.scala:183:{37,49}] wire _multipleHits_T_20 = multipleHits_leftOne_2 & multipleHits_rightOne_4; // @[Misc.scala:183:{16,61}] wire multipleHits = _multipleHits_T_19 | _multipleHits_T_20; // @[Misc.scala:183:{37,49,61}] assign _io_req_ready_T = state == 2'h0; // @[TLB.scala:352:22, :631:25] assign io_req_ready_0 = _io_req_ready_T; // @[TLB.scala:318:7, :631:25] wire _io_resp_pf_ld_T = bad_va & cmd_read; // @[TLB.scala:568:34, :633:28] wire [6:0] _io_resp_pf_ld_T_1 = pf_ld_array & hits; // @[TLB.scala:442:17, :597:24, :633:57] wire _io_resp_pf_ld_T_2 = |_io_resp_pf_ld_T_1; // @[TLB.scala:633:{57,65}] assign _io_resp_pf_ld_T_3 = _io_resp_pf_ld_T | _io_resp_pf_ld_T_2; // @[TLB.scala:633:{28,41,65}] assign io_resp_pf_ld = _io_resp_pf_ld_T_3; // @[TLB.scala:318:7, :633:41] wire _io_resp_pf_st_T = bad_va & cmd_write_perms; // @[TLB.scala:568:34, :577:35, :634:28] wire [6:0] _io_resp_pf_st_T_1 = pf_st_array & hits; // @[TLB.scala:442:17, :598:24, :634:64] wire _io_resp_pf_st_T_2 = |_io_resp_pf_st_T_1; // @[TLB.scala:634:{64,72}] assign _io_resp_pf_st_T_3 = _io_resp_pf_st_T | _io_resp_pf_st_T_2; // @[TLB.scala:634:{28,48,72}] assign io_resp_pf_st = _io_resp_pf_st_T_3; // @[TLB.scala:318:7, :634:48] wire [6:0] _io_resp_pf_inst_T = pf_inst_array & hits; // @[TLB.scala:442:17, :599:67, :635:47] wire _io_resp_pf_inst_T_1 = |_io_resp_pf_inst_T; // @[TLB.scala:635:{47,55}] assign _io_resp_pf_inst_T_2 = bad_va | _io_resp_pf_inst_T_1; // @[TLB.scala:568:34, :635:{29,55}] assign io_resp_pf_inst = _io_resp_pf_inst_T_2; // @[TLB.scala:318:7, :635:29] wire [6:0] _io_resp_ae_ld_T = ae_ld_array & hits; // @[TLB.scala:442:17, :586:24, :641:33] assign _io_resp_ae_ld_T_1 = |_io_resp_ae_ld_T; // @[TLB.scala:641:{33,41}] assign io_resp_ae_ld = _io_resp_ae_ld_T_1; // @[TLB.scala:318:7, :641:41] wire [6:0] _io_resp_ae_st_T = ae_st_array & hits; // @[TLB.scala:442:17, :590:53, :642:33] assign _io_resp_ae_st_T_1 = |_io_resp_ae_st_T; // @[TLB.scala:642:{33,41}] assign io_resp_ae_st = _io_resp_ae_st_T_1; // @[TLB.scala:318:7, :642:41] wire [6:0] _io_resp_ae_inst_T = ~px_array; // @[TLB.scala:533:87, :643:23] wire [6:0] _io_resp_ae_inst_T_1 = _io_resp_ae_inst_T & hits; // @[TLB.scala:442:17, :643:{23,33}] assign _io_resp_ae_inst_T_2 = |_io_resp_ae_inst_T_1; // @[TLB.scala:643:{33,41}] assign io_resp_ae_inst = _io_resp_ae_inst_T_2; // @[TLB.scala:318:7, :643:41] assign _io_resp_ma_ld_T = misaligned & cmd_read; // @[TLB.scala:550:77, :645:31] assign io_resp_ma_ld = _io_resp_ma_ld_T; // @[TLB.scala:318:7, :645:31] assign _io_resp_ma_st_T = misaligned & cmd_write; // @[TLB.scala:550:77, :646:31] assign io_resp_ma_st = _io_resp_ma_st_T; // @[TLB.scala:318:7, :646:31] wire [6:0] _io_resp_cacheable_T = c_array & hits; // @[TLB.scala:442:17, :537:20, :648:33] assign _io_resp_cacheable_T_1 = |_io_resp_cacheable_T; // @[TLB.scala:648:{33,41}] assign io_resp_cacheable = _io_resp_cacheable_T_1; // @[TLB.scala:318:7, :648:41] wire [6:0] _io_resp_must_alloc_T = must_alloc_array & hits; // @[TLB.scala:442:17, :595:46, :649:43] assign _io_resp_must_alloc_T_1 = |_io_resp_must_alloc_T; // @[TLB.scala:649:{43,51}] assign io_resp_must_alloc = _io_resp_must_alloc_T_1; // @[TLB.scala:318:7, :649:51] wire [6:0] _io_resp_prefetchable_T = prefetchable_array & hits; // @[TLB.scala:442:17, :547:31, :650:47] wire _io_resp_prefetchable_T_1 = |_io_resp_prefetchable_T; // @[TLB.scala:650:{47,55}] assign _io_resp_prefetchable_T_2 = _io_resp_prefetchable_T_1; // @[TLB.scala:650:{55,59}] assign io_resp_prefetchable = _io_resp_prefetchable_T_2; // @[TLB.scala:318:7, :650:59] wire _io_resp_miss_T_1 = _io_resp_miss_T | tlb_miss; // @[TLB.scala:613:64, :651:{29,52}] assign _io_resp_miss_T_2 = _io_resp_miss_T_1 | multipleHits; // @[Misc.scala:183:49] assign io_resp_miss_0 = _io_resp_miss_T_2; // @[TLB.scala:318:7, :651:64] assign _io_resp_paddr_T_1 = {ppn, _io_resp_paddr_T}; // @[Mux.scala:30:73] assign io_resp_paddr_0 = _io_resp_paddr_T_1; // @[TLB.scala:318:7, :652:23] wire [27:0] _io_resp_gpa_page_T_1 = {1'h0, vpn}; // @[TLB.scala:335:30, :657:36] wire [27:0] io_resp_gpa_page = _io_resp_gpa_page_T_1; // @[TLB.scala:657:{19,36}] wire [26:0] _io_resp_gpa_page_T_2 = r_gpa[38:12]; // @[TLB.scala:363:18, :657:58] wire [11:0] _io_resp_gpa_offset_T = r_gpa[11:0]; // @[TLB.scala:363:18, :658:47] wire [11:0] io_resp_gpa_offset = _io_resp_gpa_offset_T_1; // @[TLB.scala:658:{21,82}] assign _io_resp_gpa_T = {io_resp_gpa_page, io_resp_gpa_offset}; // @[TLB.scala:657:19, :658:21, :659:8] assign io_resp_gpa = _io_resp_gpa_T; // @[TLB.scala:318:7, :659:8] assign io_ptw_req_valid_0 = _io_ptw_req_valid_T; // @[TLB.scala:318:7, :662:29] wire _r_superpage_repl_addr_T_1 = ~superpage_entries_0_valid_0; // @[TLB.scala:341:30, :757:43] wire _r_superpage_repl_addr_T_2 = _r_superpage_repl_addr_T_1; // @[OneHot.scala:48:45] wire r_sectored_repl_addr_left_subtree_older = _GEN_57[memIdx][2]; // @[package.scala:163:13] wire _r_sectored_repl_addr_T = r_sectored_repl_addr_left_subtree_state; // @[package.scala:163:13] wire _r_sectored_repl_addr_T_1 = r_sectored_repl_addr_right_subtree_state; // @[Replacement.scala:245:38, :262:12] wire _r_sectored_repl_addr_T_2 = r_sectored_repl_addr_left_subtree_older ? _r_sectored_repl_addr_T : _r_sectored_repl_addr_T_1; // @[Replacement.scala:243:38, :250:16, :262:12] wire [1:0] _r_sectored_repl_addr_T_3 = {r_sectored_repl_addr_left_subtree_older, _r_sectored_repl_addr_T_2}; // @[Replacement.scala:243:38, :249:12, :250:16] wire [1:0] r_sectored_repl_addr_valids_lo = {_GEN_11[memIdx], _GEN_7[memIdx]}; // @[package.scala:45:27, :163:13] wire [1:0] r_sectored_repl_addr_valids_hi = {_GEN_19[memIdx], _GEN_15[memIdx]}; // @[package.scala:45:27, :163:13] wire [3:0] r_sectored_repl_addr_valids = {r_sectored_repl_addr_valids_hi, r_sectored_repl_addr_valids_lo}; // @[package.scala:45:27] wire _r_sectored_repl_addr_T_4 = &r_sectored_repl_addr_valids; // @[package.scala:45:27] wire [3:0] _r_sectored_repl_addr_T_5 = ~r_sectored_repl_addr_valids; // @[package.scala:45:27] wire _r_sectored_repl_addr_T_6 = _r_sectored_repl_addr_T_5[0]; // @[OneHot.scala:48:45] wire _r_sectored_repl_addr_T_7 = _r_sectored_repl_addr_T_5[1]; // @[OneHot.scala:48:45] wire _r_sectored_repl_addr_T_8 = _r_sectored_repl_addr_T_5[2]; // @[OneHot.scala:48:45] wire _r_sectored_repl_addr_T_9 = _r_sectored_repl_addr_T_5[3]; // @[OneHot.scala:48:45] wire [1:0] _r_sectored_repl_addr_T_10 = {1'h1, ~_r_sectored_repl_addr_T_8}; // @[OneHot.scala:48:45] wire [1:0] _r_sectored_repl_addr_T_11 = _r_sectored_repl_addr_T_7 ? 2'h1 : _r_sectored_repl_addr_T_10; // @[OneHot.scala:48:45] wire [1:0] _r_sectored_repl_addr_T_12 = _r_sectored_repl_addr_T_6 ? 2'h0 : _r_sectored_repl_addr_T_11; // @[OneHot.scala:48:45] wire [1:0] _r_sectored_repl_addr_T_13 = _r_sectored_repl_addr_T_4 ? _r_sectored_repl_addr_T_3 : _r_sectored_repl_addr_T_12; // @[Mux.scala:50:70] wire _r_sectored_hit_valid_T = sector_hits_0 | sector_hits_1; // @[package.scala:81:59] wire _r_sectored_hit_valid_T_1 = _r_sectored_hit_valid_T | sector_hits_2; // @[package.scala:81:59] wire _r_sectored_hit_valid_T_2 = _r_sectored_hit_valid_T_1 | sector_hits_3; // @[package.scala:81:59] wire [3:0] _r_sectored_hit_bits_T = {r_sectored_hit_bits_hi, r_sectored_hit_bits_lo}; // @[OneHot.scala:21:45] wire [1:0] r_sectored_hit_bits_hi_1 = _r_sectored_hit_bits_T[3:2]; // @[OneHot.scala:21:45, :30:18] wire [1:0] r_sectored_hit_bits_lo_1 = _r_sectored_hit_bits_T[1:0]; // @[OneHot.scala:21:45, :31:18] wire _r_sectored_hit_bits_T_1 = |r_sectored_hit_bits_hi_1; // @[OneHot.scala:30:18, :32:14] wire [1:0] _r_sectored_hit_bits_T_2 = r_sectored_hit_bits_hi_1 | r_sectored_hit_bits_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28] wire _r_sectored_hit_bits_T_3 = _r_sectored_hit_bits_T_2[1]; // @[OneHot.scala:32:28] wire [1:0] _r_sectored_hit_bits_T_4 = {_r_sectored_hit_bits_T_1, _r_sectored_hit_bits_T_3}; // @[OneHot.scala:32:{10,14}] wire [1:0] _state_T = {1'h1, io_sfence_valid_0}; // @[TLB.scala:318:7, :704:45] wire _tagMatch_T = ~superpage_entries_0_tag_v; // @[TLB.scala:178:43, :341:30] wire tagMatch = superpage_entries_0_valid_0 & _tagMatch_T; // @[TLB.scala:178:{33,43}, :341:30] wire ignore_1 = _ignore_T_1; // @[TLB.scala:182:{28,34}] wire _ignore_T_2 = ~(superpage_entries_0_level[1]); // @[TLB.scala:182:28, :341:30] wire _tagMatch_T_1 = ~special_entry_tag_v; // @[TLB.scala:178:43, :346:56] wire tagMatch_1 = special_entry_valid_0 & _tagMatch_T_1; // @[TLB.scala:178:{33,43}, :346:56] wire ignore_4 = _ignore_T_4; // @[TLB.scala:182:{28,34}] wire _ignore_T_5 = ~(special_entry_level[1]); // @[TLB.scala:182:28, :197:28, :346:56] wire ignore_5 = _ignore_T_5; // @[TLB.scala:182:{28,34}] wire _T_12 = io_req_valid_0 & vm_enabled; // @[TLB.scala:318:7, :399:61, :617:22] wire _T_15 = sector_hits_0 | sector_hits_1 | sector_hits_2 | sector_hits_3; // @[package.scala:81:59] wire _GEN_58 = do_refill & ~io_ptw_resp_bits_homogeneous_0; // @[TLB.scala:211:18, :318:7, :346:56, :408:29, :446:20, :474:{39,70}] wire _GEN_59 = ~do_refill | ~io_ptw_resp_bits_homogeneous_0 | io_ptw_resp_bits_level_0[1]; // @[TLB.scala:318:7, :341:30, :408:29, :446:20, :474:70, :476:{40,58}] wire _T_4 = waddr_1 == 2'h0; // @[TLB.scala:485:22, :486:75] wire _GEN_60 = r_memIdx == 2'h0; // @[package.scala:163:13] wire _GEN_61 = r_memIdx == 2'h1; // @[package.scala:163:13] wire _GEN_62 = r_memIdx == 2'h2; // @[package.scala:163:13] wire _GEN_63 = ~io_ptw_resp_bits_homogeneous_0 | ~(io_ptw_resp_bits_level_0[1]); // @[TLB.scala:318:7, :339:29, :474:{39,70}, :476:{40,58}, :486:84] wire _GEN_64 = ~do_refill | _GEN_63 | ~(_T_4 & _GEN_60); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _GEN_65 = ~do_refill | _GEN_63 | ~(_T_4 & _GEN_61); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _GEN_66 = ~do_refill | _GEN_63 | ~(_T_4 & _GEN_62); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _GEN_67 = ~do_refill | _GEN_63 | ~(_T_4 & (&r_memIdx)); // @[package.scala:163:13] wire _GEN_68 = invalidate_refill & _GEN_60; // @[TLB.scala:216:16, :220:46, :410:88, :489:34] wire _GEN_69 = ~do_refill | _GEN_63 | ~_T_4; // @[TLB.scala:339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _GEN_70 = invalidate_refill & _GEN_61; // @[TLB.scala:216:16, :220:46, :410:88, :489:34] wire _GEN_71 = invalidate_refill & _GEN_62; // @[TLB.scala:216:16, :220:46, :410:88, :489:34] wire _GEN_72 = invalidate_refill & (&r_memIdx); // @[package.scala:163:13] wire _T_6 = waddr_1 == 2'h1; // @[TLB.scala:197:28, :485:22, :486:75] wire _GEN_73 = ~do_refill | _GEN_63 | ~(_T_6 & _GEN_60); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _GEN_74 = ~do_refill | _GEN_63 | ~(_T_6 & _GEN_61); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _GEN_75 = ~do_refill | _GEN_63 | ~(_T_6 & _GEN_62); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _GEN_76 = ~do_refill | _GEN_63 | ~(_T_6 & (&r_memIdx)); // @[package.scala:163:13] wire _GEN_77 = ~do_refill | _GEN_63 | ~_T_6; // @[TLB.scala:339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _T_8 = waddr_1 == 2'h2; // @[TLB.scala:485:22, :486:75] wire _GEN_78 = ~do_refill | _GEN_63 | ~(_T_8 & _GEN_60); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _GEN_79 = ~do_refill | _GEN_63 | ~(_T_8 & _GEN_61); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _GEN_80 = ~do_refill | _GEN_63 | ~(_T_8 & _GEN_62); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _GEN_81 = ~do_refill | _GEN_63 | ~(_T_8 & (&r_memIdx)); // @[package.scala:163:13] wire _GEN_82 = ~do_refill | _GEN_63 | ~_T_8; // @[TLB.scala:339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :486:{75,84}] wire _GEN_83 = ~do_refill | _GEN_63 | ~((&waddr_1) & _GEN_60); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :485:22, :486:{75,84}] wire _GEN_84 = ~do_refill | _GEN_63 | ~((&waddr_1) & _GEN_61); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :485:22, :486:{75,84}] wire _GEN_85 = ~do_refill | _GEN_63 | ~((&waddr_1) & _GEN_62); // @[TLB.scala:211:18, :220:46, :339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :485:22, :486:{75,84}] wire _GEN_86 = ~do_refill | _GEN_63 | ~((&waddr_1) & (&r_memIdx)); // @[package.scala:163:13] wire _GEN_87 = ~do_refill | _GEN_63 | ~(&waddr_1); // @[TLB.scala:339:29, :341:30, :408:29, :446:20, :474:70, :476:58, :485:22, :486:{75,84}] wire _T_2491 = io_ptw_req_ready_0 & io_ptw_req_valid_0; // @[Decoupled.scala:51:35] wire _T_24 = io_req_ready_0 & io_req_valid_0 & tlb_miss; // @[Decoupled.scala:51:35] wire _T_2490 = multipleHits | reset; // @[Misc.scala:183:49] always @(posedge clock) begin // @[TLB.scala:318:7] if (_GEN_64) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_0_0_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_0_0_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_0_0_tag_v <= _GEN_64 & sectored_entries_0_0_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_64) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_0_0_data_0 <= _sectored_entries_0_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_0_0_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_0_0_tag_v) & (_GEN_69 ? sectored_entries_0_0_valid_0 : ~_GEN_68 & (_GEN_60 | ~(~r_sectored_hit_valid & _GEN_60) & sectored_entries_0_0_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_73) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_0_1_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_0_1_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_0_1_tag_v <= _GEN_73 & sectored_entries_0_1_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_73) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_0_1_data_0 <= _sectored_entries_1_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_0_1_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_0_1_tag_v) & (_GEN_77 ? sectored_entries_0_1_valid_0 : ~_GEN_68 & (_GEN_60 | ~(~r_sectored_hit_valid & _GEN_60) & sectored_entries_0_1_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_78) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_0_2_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_0_2_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_0_2_tag_v <= _GEN_78 & sectored_entries_0_2_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_78) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_0_2_data_0 <= _sectored_entries_2_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_0_2_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_0_2_tag_v) & (_GEN_82 ? sectored_entries_0_2_valid_0 : ~_GEN_68 & (_GEN_60 | ~(~r_sectored_hit_valid & _GEN_60) & sectored_entries_0_2_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_83) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_0_3_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_0_3_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_0_3_tag_v <= _GEN_83 & sectored_entries_0_3_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_83) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_0_3_data_0 <= _sectored_entries_3_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_0_3_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_0_3_tag_v) & (_GEN_87 ? sectored_entries_0_3_valid_0 : ~_GEN_68 & (_GEN_60 | ~(~r_sectored_hit_valid & _GEN_60) & sectored_entries_0_3_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_65) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_1_0_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_1_0_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_1_0_tag_v <= _GEN_65 & sectored_entries_1_0_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_65) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_1_0_data_0 <= _sectored_entries_0_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_1_0_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_1_0_tag_v) & (_GEN_69 ? sectored_entries_1_0_valid_0 : ~_GEN_70 & (_GEN_61 | ~(~r_sectored_hit_valid & _GEN_61) & sectored_entries_1_0_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_74) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_1_1_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_1_1_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_1_1_tag_v <= _GEN_74 & sectored_entries_1_1_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_74) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_1_1_data_0 <= _sectored_entries_1_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_1_1_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_1_1_tag_v) & (_GEN_77 ? sectored_entries_1_1_valid_0 : ~_GEN_70 & (_GEN_61 | ~(~r_sectored_hit_valid & _GEN_61) & sectored_entries_1_1_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_79) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_1_2_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_1_2_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_1_2_tag_v <= _GEN_79 & sectored_entries_1_2_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_79) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_1_2_data_0 <= _sectored_entries_2_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_1_2_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_1_2_tag_v) & (_GEN_82 ? sectored_entries_1_2_valid_0 : ~_GEN_70 & (_GEN_61 | ~(~r_sectored_hit_valid & _GEN_61) & sectored_entries_1_2_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_84) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_1_3_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_1_3_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_1_3_tag_v <= _GEN_84 & sectored_entries_1_3_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_84) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_1_3_data_0 <= _sectored_entries_3_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_1_3_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_1_3_tag_v) & (_GEN_87 ? sectored_entries_1_3_valid_0 : ~_GEN_70 & (_GEN_61 | ~(~r_sectored_hit_valid & _GEN_61) & sectored_entries_1_3_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_66) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_2_0_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_2_0_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_2_0_tag_v <= _GEN_66 & sectored_entries_2_0_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_66) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_2_0_data_0 <= _sectored_entries_0_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_2_0_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_2_0_tag_v) & (_GEN_69 ? sectored_entries_2_0_valid_0 : ~_GEN_71 & (_GEN_62 | ~(~r_sectored_hit_valid & _GEN_62) & sectored_entries_2_0_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_75) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_2_1_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_2_1_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_2_1_tag_v <= _GEN_75 & sectored_entries_2_1_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_75) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_2_1_data_0 <= _sectored_entries_1_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_2_1_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_2_1_tag_v) & (_GEN_77 ? sectored_entries_2_1_valid_0 : ~_GEN_71 & (_GEN_62 | ~(~r_sectored_hit_valid & _GEN_62) & sectored_entries_2_1_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_80) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_2_2_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_2_2_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_2_2_tag_v <= _GEN_80 & sectored_entries_2_2_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_80) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_2_2_data_0 <= _sectored_entries_2_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_2_2_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_2_2_tag_v) & (_GEN_82 ? sectored_entries_2_2_valid_0 : ~_GEN_71 & (_GEN_62 | ~(~r_sectored_hit_valid & _GEN_62) & sectored_entries_2_2_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_85) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_2_3_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_2_3_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_2_3_tag_v <= _GEN_85 & sectored_entries_2_3_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_85) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_2_3_data_0 <= _sectored_entries_3_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_2_3_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_2_3_tag_v) & (_GEN_87 ? sectored_entries_2_3_valid_0 : ~_GEN_71 & (_GEN_62 | ~(~r_sectored_hit_valid & _GEN_62) & sectored_entries_2_3_valid_0)); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :339:29, :357:27, :446:20, :474:70, :476:58, :486:84, :487:{15,38}, :489:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_67) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_3_0_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_3_0_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_3_0_tag_v <= _GEN_67 & sectored_entries_3_0_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_67) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_3_0_data_0 <= _sectored_entries_0_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_3_0_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_3_0_tag_v) & (_GEN_69 ? sectored_entries_3_0_valid_0 : ~_GEN_72 & ((&r_memIdx) | ~(~r_sectored_hit_valid & (&r_memIdx)) & sectored_entries_3_0_valid_0)); // @[package.scala:163:13] if (_GEN_76) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_3_1_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_3_1_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_3_1_tag_v <= _GEN_76 & sectored_entries_3_1_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_76) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_3_1_data_0 <= _sectored_entries_1_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_3_1_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_3_1_tag_v) & (_GEN_77 ? sectored_entries_3_1_valid_0 : ~_GEN_72 & ((&r_memIdx) | ~(~r_sectored_hit_valid & (&r_memIdx)) & sectored_entries_3_1_valid_0)); // @[package.scala:163:13] if (_GEN_81) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_3_2_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_3_2_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_3_2_tag_v <= _GEN_81 & sectored_entries_3_2_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_81) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_3_2_data_0 <= _sectored_entries_2_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_3_2_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_3_2_tag_v) & (_GEN_82 ? sectored_entries_3_2_valid_0 : ~_GEN_72 & ((&r_memIdx) | ~(~r_sectored_hit_valid & (&r_memIdx)) & sectored_entries_3_2_valid_0)); // @[package.scala:163:13] if (_GEN_86) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_3_3_level <= 2'h0; // @[TLB.scala:339:29] sectored_entries_3_3_tag_vpn <= r_refill_tag; // @[TLB.scala:339:29, :354:25] end sectored_entries_3_3_tag_v <= _GEN_86 & sectored_entries_3_3_tag_v; // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] if (_GEN_86) begin // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] end else // @[TLB.scala:339:29, :446:20, :474:70, :476:58, :486:84] sectored_entries_3_3_data_0 <= _sectored_entries_3_data_0_T; // @[TLB.scala:217:24, :339:29] sectored_entries_3_3_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~sectored_entries_3_3_tag_v) & (_GEN_87 ? sectored_entries_3_3_valid_0 : ~_GEN_72 & ((&r_memIdx) | ~(~r_sectored_hit_valid & (&r_memIdx)) & sectored_entries_3_3_valid_0)); // @[package.scala:163:13] if (_GEN_59) begin // @[TLB.scala:341:30, :446:20, :474:70, :476:58] end else begin // @[TLB.scala:341:30, :446:20, :474:70, :476:58] superpage_entries_0_level <= {1'h0, _superpage_entries_0_level_T}; // @[package.scala:163:13] superpage_entries_0_tag_vpn <= r_refill_tag; // @[TLB.scala:341:30, :354:25] end superpage_entries_0_tag_v <= _GEN_59 & superpage_entries_0_tag_v; // @[TLB.scala:341:30, :446:20, :474:70, :476:58] if (_GEN_59) begin // @[TLB.scala:341:30, :446:20, :474:70, :476:58] end else // @[TLB.scala:341:30, :446:20, :474:70, :476:58] superpage_entries_0_data_0 <= _superpage_entries_0_data_0_T; // @[TLB.scala:217:24, :341:30] superpage_entries_0_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~superpage_entries_0_tag_v) & (_GEN_59 ? superpage_entries_0_valid_0 : ~invalidate_refill); // @[TLB.scala:216:16, :220:46, :223:{19,32,36}, :318:7, :341:30, :410:88, :446:20, :474:70, :476:58, :480:34, :718:19, :723:42, :728:46, :732:{24,41}] if (_GEN_58) begin // @[TLB.scala:211:18, :346:56, :446:20, :474:70] special_entry_level <= _special_entry_level_T; // @[package.scala:163:13] special_entry_tag_vpn <= r_refill_tag; // @[TLB.scala:346:56, :354:25] special_entry_data_0 <= _special_entry_data_0_T; // @[TLB.scala:217:24, :346:56] end special_entry_tag_v <= ~_GEN_58 & special_entry_tag_v; // @[TLB.scala:211:18, :212:16, :346:56, :446:20, :474:70] special_entry_valid_0 <= ~(_T_2490 | io_sfence_valid_0 & ~special_entry_tag_v) & (_GEN_58 | special_entry_valid_0); // @[TLB.scala:211:18, :216:16, :220:46, :223:{19,32,36}, :318:7, :346:56, :446:20, :474:70, :718:19, :723:42, :728:46, :732:{24,41}] if (_T_24) begin // @[Decoupled.scala:51:35] r_refill_tag <= vpn; // @[TLB.scala:335:30, :354:25] r_sectored_repl_addr <= _r_sectored_repl_addr_T_13; // @[TLB.scala:356:33, :757:8] r_sectored_hit_valid <= _r_sectored_hit_valid_T_2; // @[package.scala:81:59] r_sectored_hit_bits <= _r_sectored_hit_bits_T_4; // @[OneHot.scala:32:10] r_superpage_hit_valid <= superpage_hits_0; // @[TLB.scala:183:29, :358:28] r_need_gpa <= tlb_hit_if_not_gpa_miss; // @[TLB.scala:361:23, :610:43] end r_gpa_valid <= ~_T_2491 & (do_refill ? io_ptw_resp_bits_gpa_valid_0 : r_gpa_valid); // @[Decoupled.scala:51:35] if (do_refill) begin // @[TLB.scala:408:29] r_gpa <= io_ptw_resp_bits_gpa_bits_0; // @[TLB.scala:318:7, :363:18] r_gpa_is_pte <= io_ptw_resp_bits_gpa_is_pte_0; // @[TLB.scala:318:7, :365:25] end if (_T_2491) // @[Decoupled.scala:51:35] r_gpa_vpn <= r_refill_tag; // @[TLB.scala:354:25, :364:22] if (reset) begin // @[TLB.scala:318:7] state <= 2'h0; // @[TLB.scala:352:22] state_vec_0 <= 3'h0; // @[Replacement.scala:305:17] state_vec_1 <= 3'h0; // @[Replacement.scala:305:17] state_vec_2 <= 3'h0; // @[Replacement.scala:305:17] state_vec_3 <= 3'h0; // @[Replacement.scala:305:17] end else begin // @[TLB.scala:318:7] if (io_ptw_resp_valid_0) // @[TLB.scala:318:7] state <= 2'h0; // @[TLB.scala:352:22] else if (state == 2'h2 & io_sfence_valid_0) // @[TLB.scala:318:7, :352:22, :709:{17,28}] state <= 2'h3; // @[TLB.scala:352:22] else if (_T_25) begin // @[package.scala:16:47] if (io_ptw_req_ready_0) // @[TLB.scala:318:7] state <= _state_T; // @[TLB.scala:352:22, :704:45] else if (io_sfence_valid_0) // @[TLB.scala:318:7] state <= 2'h0; // @[TLB.scala:352:22] else if (_T_24) // @[Decoupled.scala:51:35] state <= 2'h1; // @[TLB.scala:197:28, :352:22] end else if (_T_24) // @[Decoupled.scala:51:35] state <= 2'h1; // @[TLB.scala:197:28, :352:22] if (_T_12 & _T_15 & memIdx == 2'h0) // @[package.scala:81:59, :163:13] state_vec_0 <= _state_vec_T_8; // @[Replacement.scala:202:12, :305:17] if (_T_12 & _T_15 & memIdx == 2'h1) // @[package.scala:81:59, :163:13] state_vec_1 <= _state_vec_T_8; // @[Replacement.scala:202:12, :305:17] if (_T_12 & _T_15 & memIdx == 2'h2) // @[package.scala:81:59, :163:13] state_vec_2 <= _state_vec_T_8; // @[Replacement.scala:202:12, :305:17] if (_T_12 & _T_15 & (&memIdx)) // @[package.scala:81:59, :163:13] state_vec_3 <= _state_vec_T_8; // @[Replacement.scala:202:12, :305:17] end always @(posedge) OptimizationBarrier_TLBEntryData_56 mpu_ppn_barrier ( // @[package.scala:267:25] .clock (clock), .reset (reset), .io_x_ppn (_mpu_ppn_WIRE_ppn), // @[TLB.scala:170:77] .io_x_u (_mpu_ppn_WIRE_u), // @[TLB.scala:170:77] .io_x_g (_mpu_ppn_WIRE_g), // @[TLB.scala:170:77] .io_x_ae_ptw (_mpu_ppn_WIRE_ae_ptw), // @[TLB.scala:170:77] .io_x_ae_final (_mpu_ppn_WIRE_ae_final), // @[TLB.scala:170:77] .io_x_ae_stage2 (_mpu_ppn_WIRE_ae_stage2), // @[TLB.scala:170:77] .io_x_pf (_mpu_ppn_WIRE_pf), // @[TLB.scala:170:77] .io_x_gf (_mpu_ppn_WIRE_gf), // @[TLB.scala:170:77] .io_x_sw (_mpu_ppn_WIRE_sw), // @[TLB.scala:170:77] .io_x_sx (_mpu_ppn_WIRE_sx), // @[TLB.scala:170:77] .io_x_sr (_mpu_ppn_WIRE_sr), // @[TLB.scala:170:77] .io_x_hw (_mpu_ppn_WIRE_hw), // @[TLB.scala:170:77] .io_x_hx (_mpu_ppn_WIRE_hx), // @[TLB.scala:170:77] .io_x_hr (_mpu_ppn_WIRE_hr), // @[TLB.scala:170:77] .io_x_pw (_mpu_ppn_WIRE_pw), // @[TLB.scala:170:77] .io_x_px (_mpu_ppn_WIRE_px), // @[TLB.scala:170:77] .io_x_pr (_mpu_ppn_WIRE_pr), // @[TLB.scala:170:77] .io_x_ppp (_mpu_ppn_WIRE_ppp), // @[TLB.scala:170:77] .io_x_pal (_mpu_ppn_WIRE_pal), // @[TLB.scala:170:77] .io_x_paa (_mpu_ppn_WIRE_paa), // @[TLB.scala:170:77] .io_x_eff (_mpu_ppn_WIRE_eff), // @[TLB.scala:170:77] .io_x_c (_mpu_ppn_WIRE_c), // @[TLB.scala:170:77] .io_x_fragmented_superpage (_mpu_ppn_WIRE_fragmented_superpage), // @[TLB.scala:170:77] .io_y_ppn (_mpu_ppn_barrier_io_y_ppn) ); // @[package.scala:267:25] PMPChecker_s3_6 pmp ( // @[TLB.scala:416:19] .clock (clock), .reset (reset), .io_prv (mpu_priv[1:0]), // @[TLB.scala:415:27, :420:14] .io_pmp_0_cfg_l (io_ptw_pmp_0_cfg_l_0), // @[TLB.scala:318:7] .io_pmp_0_cfg_a (io_ptw_pmp_0_cfg_a_0), // @[TLB.scala:318:7] .io_pmp_0_cfg_x (io_ptw_pmp_0_cfg_x_0), // @[TLB.scala:318:7] .io_pmp_0_cfg_w (io_ptw_pmp_0_cfg_w_0), // @[TLB.scala:318:7] .io_pmp_0_cfg_r (io_ptw_pmp_0_cfg_r_0), // @[TLB.scala:318:7] .io_pmp_0_addr (io_ptw_pmp_0_addr_0), // @[TLB.scala:318:7] .io_pmp_0_mask (io_ptw_pmp_0_mask_0), // @[TLB.scala:318:7] .io_pmp_1_cfg_l (io_ptw_pmp_1_cfg_l_0), // @[TLB.scala:318:7] .io_pmp_1_cfg_a (io_ptw_pmp_1_cfg_a_0), // @[TLB.scala:318:7] .io_pmp_1_cfg_x (io_ptw_pmp_1_cfg_x_0), // @[TLB.scala:318:7] .io_pmp_1_cfg_w (io_ptw_pmp_1_cfg_w_0), // @[TLB.scala:318:7] .io_pmp_1_cfg_r (io_ptw_pmp_1_cfg_r_0), // @[TLB.scala:318:7] .io_pmp_1_addr (io_ptw_pmp_1_addr_0), // @[TLB.scala:318:7] .io_pmp_1_mask (io_ptw_pmp_1_mask_0), // @[TLB.scala:318:7] .io_pmp_2_cfg_l (io_ptw_pmp_2_cfg_l_0), // @[TLB.scala:318:7] .io_pmp_2_cfg_a (io_ptw_pmp_2_cfg_a_0), // @[TLB.scala:318:7] .io_pmp_2_cfg_x (io_ptw_pmp_2_cfg_x_0), // @[TLB.scala:318:7] .io_pmp_2_cfg_w (io_ptw_pmp_2_cfg_w_0), // @[TLB.scala:318:7] .io_pmp_2_cfg_r (io_ptw_pmp_2_cfg_r_0), // @[TLB.scala:318:7] .io_pmp_2_addr (io_ptw_pmp_2_addr_0), // @[TLB.scala:318:7] .io_pmp_2_mask (io_ptw_pmp_2_mask_0), // @[TLB.scala:318:7] .io_pmp_3_cfg_l (io_ptw_pmp_3_cfg_l_0), // @[TLB.scala:318:7] .io_pmp_3_cfg_a (io_ptw_pmp_3_cfg_a_0), // @[TLB.scala:318:7] .io_pmp_3_cfg_x (io_ptw_pmp_3_cfg_x_0), // @[TLB.scala:318:7] .io_pmp_3_cfg_w (io_ptw_pmp_3_cfg_w_0), // @[TLB.scala:318:7] .io_pmp_3_cfg_r (io_ptw_pmp_3_cfg_r_0), // @[TLB.scala:318:7] .io_pmp_3_addr (io_ptw_pmp_3_addr_0), // @[TLB.scala:318:7] .io_pmp_3_mask (io_ptw_pmp_3_mask_0), // @[TLB.scala:318:7] .io_pmp_4_cfg_l (io_ptw_pmp_4_cfg_l_0), // @[TLB.scala:318:7] .io_pmp_4_cfg_a (io_ptw_pmp_4_cfg_a_0), // @[TLB.scala:318:7] .io_pmp_4_cfg_x (io_ptw_pmp_4_cfg_x_0), // @[TLB.scala:318:7] .io_pmp_4_cfg_w (io_ptw_pmp_4_cfg_w_0), // @[TLB.scala:318:7] .io_pmp_4_cfg_r (io_ptw_pmp_4_cfg_r_0), // @[TLB.scala:318:7] .io_pmp_4_addr (io_ptw_pmp_4_addr_0), // @[TLB.scala:318:7] .io_pmp_4_mask (io_ptw_pmp_4_mask_0), // @[TLB.scala:318:7] .io_pmp_5_cfg_l (io_ptw_pmp_5_cfg_l_0), // @[TLB.scala:318:7] .io_pmp_5_cfg_a (io_ptw_pmp_5_cfg_a_0), // @[TLB.scala:318:7] .io_pmp_5_cfg_x (io_ptw_pmp_5_cfg_x_0), // @[TLB.scala:318:7] .io_pmp_5_cfg_w (io_ptw_pmp_5_cfg_w_0), // @[TLB.scala:318:7] .io_pmp_5_cfg_r (io_ptw_pmp_5_cfg_r_0), // @[TLB.scala:318:7] .io_pmp_5_addr (io_ptw_pmp_5_addr_0), // @[TLB.scala:318:7] .io_pmp_5_mask (io_ptw_pmp_5_mask_0), // @[TLB.scala:318:7] .io_pmp_6_cfg_l (io_ptw_pmp_6_cfg_l_0), // @[TLB.scala:318:7] .io_pmp_6_cfg_a (io_ptw_pmp_6_cfg_a_0), // @[TLB.scala:318:7] .io_pmp_6_cfg_x (io_ptw_pmp_6_cfg_x_0), // @[TLB.scala:318:7] .io_pmp_6_cfg_w (io_ptw_pmp_6_cfg_w_0), // @[TLB.scala:318:7] .io_pmp_6_cfg_r (io_ptw_pmp_6_cfg_r_0), // @[TLB.scala:318:7] .io_pmp_6_addr (io_ptw_pmp_6_addr_0), // @[TLB.scala:318:7] .io_pmp_6_mask (io_ptw_pmp_6_mask_0), // @[TLB.scala:318:7] .io_pmp_7_cfg_l (io_ptw_pmp_7_cfg_l_0), // @[TLB.scala:318:7] .io_pmp_7_cfg_a (io_ptw_pmp_7_cfg_a_0), // @[TLB.scala:318:7] .io_pmp_7_cfg_x (io_ptw_pmp_7_cfg_x_0), // @[TLB.scala:318:7] .io_pmp_7_cfg_w (io_ptw_pmp_7_cfg_w_0), // @[TLB.scala:318:7] .io_pmp_7_cfg_r (io_ptw_pmp_7_cfg_r_0), // @[TLB.scala:318:7] .io_pmp_7_addr (io_ptw_pmp_7_addr_0), // @[TLB.scala:318:7] .io_pmp_7_mask (io_ptw_pmp_7_mask_0), // @[TLB.scala:318:7] .io_addr (mpu_physaddr[31:0]), // @[TLB.scala:414:25, :417:15] .io_size (io_req_bits_size_0), // @[TLB.scala:318:7] .io_r (_pmp_io_r), .io_w (_pmp_io_w), .io_x (_pmp_io_x) ); // @[TLB.scala:416:19] PMAChecker_6 pma ( // @[TLB.scala:422:19] .clock (clock), .reset (reset), .io_paddr (mpu_physaddr), // @[TLB.scala:414:25] .io_resp_cacheable (cacheable), .io_resp_r (_pma_io_resp_r), .io_resp_w (_pma_io_resp_w), .io_resp_pp (_pma_io_resp_pp), .io_resp_al (_pma_io_resp_al), .io_resp_aa (_pma_io_resp_aa), .io_resp_x (_pma_io_resp_x), .io_resp_eff (_pma_io_resp_eff) ); // @[TLB.scala:422:19] assign newEntry_ppp = _pma_io_resp_pp; // @[TLB.scala:422:19, :449:24] assign newEntry_pal = _pma_io_resp_al; // @[TLB.scala:422:19, :449:24] assign newEntry_paa = _pma_io_resp_aa; // @[TLB.scala:422:19, :449:24] assign newEntry_eff = _pma_io_resp_eff; // @[TLB.scala:422:19, :449:24] OptimizationBarrier_TLBEntryData_57 entries_barrier ( // @[package.scala:267:25] .clock (clock), .reset (reset), .io_x_ppn (_entries_WIRE_ppn), // @[TLB.scala:170:77] .io_x_u (_entries_WIRE_u), // @[TLB.scala:170:77] .io_x_g (_entries_WIRE_g), // @[TLB.scala:170:77] .io_x_ae_ptw (_entries_WIRE_ae_ptw), // @[TLB.scala:170:77] .io_x_ae_final (_entries_WIRE_ae_final), // @[TLB.scala:170:77] .io_x_ae_stage2 (_entries_WIRE_ae_stage2), // @[TLB.scala:170:77] .io_x_pf (_entries_WIRE_pf), // @[TLB.scala:170:77] .io_x_gf (_entries_WIRE_gf), // @[TLB.scala:170:77] .io_x_sw (_entries_WIRE_sw), // @[TLB.scala:170:77] .io_x_sx (_entries_WIRE_sx), // @[TLB.scala:170:77] .io_x_sr (_entries_WIRE_sr), // @[TLB.scala:170:77] .io_x_hw (_entries_WIRE_hw), // @[TLB.scala:170:77] .io_x_hx (_entries_WIRE_hx), // @[TLB.scala:170:77] .io_x_hr (_entries_WIRE_hr), // @[TLB.scala:170:77] .io_x_pw (_entries_WIRE_pw), // @[TLB.scala:170:77] .io_x_px (_entries_WIRE_px), // @[TLB.scala:170:77] .io_x_pr (_entries_WIRE_pr), // @[TLB.scala:170:77] .io_x_ppp (_entries_WIRE_ppp), // @[TLB.scala:170:77] .io_x_pal (_entries_WIRE_pal), // @[TLB.scala:170:77] .io_x_paa (_entries_WIRE_paa), // @[TLB.scala:170:77] .io_x_eff (_entries_WIRE_eff), // @[TLB.scala:170:77] .io_x_c (_entries_WIRE_c), // @[TLB.scala:170:77] .io_x_fragmented_superpage (_entries_WIRE_fragmented_superpage), // @[TLB.scala:170:77] .io_y_ppn (_entries_barrier_io_y_ppn), .io_y_u (_entries_barrier_io_y_u), .io_y_ae_ptw (_entries_barrier_io_y_ae_ptw), .io_y_ae_final (_entries_barrier_io_y_ae_final), .io_y_ae_stage2 (_entries_barrier_io_y_ae_stage2), .io_y_pf (_entries_barrier_io_y_pf), .io_y_gf (_entries_barrier_io_y_gf), .io_y_sw (_entries_barrier_io_y_sw), .io_y_sx (_entries_barrier_io_y_sx), .io_y_sr (_entries_barrier_io_y_sr), .io_y_hw (_entries_barrier_io_y_hw), .io_y_hx (_entries_barrier_io_y_hx), .io_y_hr (_entries_barrier_io_y_hr), .io_y_pw (_entries_barrier_io_y_pw), .io_y_px (_entries_barrier_io_y_px), .io_y_pr (_entries_barrier_io_y_pr), .io_y_ppp (_entries_barrier_io_y_ppp), .io_y_pal (_entries_barrier_io_y_pal), .io_y_paa (_entries_barrier_io_y_paa), .io_y_eff (_entries_barrier_io_y_eff), .io_y_c (_entries_barrier_io_y_c) ); // @[package.scala:267:25] OptimizationBarrier_TLBEntryData_58 entries_barrier_1 ( // @[package.scala:267:25] .clock (clock), .reset (reset), .io_x_ppn (_entries_WIRE_2_ppn), // @[TLB.scala:170:77] .io_x_u (_entries_WIRE_2_u), // @[TLB.scala:170:77] .io_x_g (_entries_WIRE_2_g), // @[TLB.scala:170:77] .io_x_ae_ptw (_entries_WIRE_2_ae_ptw), // @[TLB.scala:170:77] .io_x_ae_final (_entries_WIRE_2_ae_final), // @[TLB.scala:170:77] .io_x_ae_stage2 (_entries_WIRE_2_ae_stage2), // @[TLB.scala:170:77] .io_x_pf (_entries_WIRE_2_pf), // @[TLB.scala:170:77] .io_x_gf (_entries_WIRE_2_gf), // @[TLB.scala:170:77] .io_x_sw (_entries_WIRE_2_sw), // @[TLB.scala:170:77] .io_x_sx (_entries_WIRE_2_sx), // @[TLB.scala:170:77] .io_x_sr (_entries_WIRE_2_sr), // @[TLB.scala:170:77] .io_x_hw (_entries_WIRE_2_hw), // @[TLB.scala:170:77] .io_x_hx (_entries_WIRE_2_hx), // @[TLB.scala:170:77] .io_x_hr (_entries_WIRE_2_hr), // @[TLB.scala:170:77] .io_x_pw (_entries_WIRE_2_pw), // @[TLB.scala:170:77] .io_x_px (_entries_WIRE_2_px), // @[TLB.scala:170:77] .io_x_pr (_entries_WIRE_2_pr), // @[TLB.scala:170:77] .io_x_ppp (_entries_WIRE_2_ppp), // @[TLB.scala:170:77] .io_x_pal (_entries_WIRE_2_pal), // @[TLB.scala:170:77] .io_x_paa (_entries_WIRE_2_paa), // @[TLB.scala:170:77] .io_x_eff (_entries_WIRE_2_eff), // @[TLB.scala:170:77] .io_x_c (_entries_WIRE_2_c), // @[TLB.scala:170:77] .io_x_fragmented_superpage (_entries_WIRE_2_fragmented_superpage), // @[TLB.scala:170:77] .io_y_ppn (_entries_barrier_1_io_y_ppn), .io_y_u (_entries_barrier_1_io_y_u), .io_y_ae_ptw (_entries_barrier_1_io_y_ae_ptw), .io_y_ae_final (_entries_barrier_1_io_y_ae_final), .io_y_ae_stage2 (_entries_barrier_1_io_y_ae_stage2), .io_y_pf (_entries_barrier_1_io_y_pf), .io_y_gf (_entries_barrier_1_io_y_gf), .io_y_sw (_entries_barrier_1_io_y_sw), .io_y_sx (_entries_barrier_1_io_y_sx), .io_y_sr (_entries_barrier_1_io_y_sr), .io_y_hw (_entries_barrier_1_io_y_hw), .io_y_hx (_entries_barrier_1_io_y_hx), .io_y_hr (_entries_barrier_1_io_y_hr), .io_y_pw (_entries_barrier_1_io_y_pw), .io_y_px (_entries_barrier_1_io_y_px), .io_y_pr (_entries_barrier_1_io_y_pr), .io_y_ppp (_entries_barrier_1_io_y_ppp), .io_y_pal (_entries_barrier_1_io_y_pal), .io_y_paa (_entries_barrier_1_io_y_paa), .io_y_eff (_entries_barrier_1_io_y_eff), .io_y_c (_entries_barrier_1_io_y_c) ); // @[package.scala:267:25] OptimizationBarrier_TLBEntryData_59 entries_barrier_2 ( // @[package.scala:267:25] .clock (clock), .reset (reset), .io_x_ppn (_entries_WIRE_4_ppn), // @[TLB.scala:170:77] .io_x_u (_entries_WIRE_4_u), // @[TLB.scala:170:77] .io_x_g (_entries_WIRE_4_g), // @[TLB.scala:170:77] .io_x_ae_ptw (_entries_WIRE_4_ae_ptw), // @[TLB.scala:170:77] .io_x_ae_final (_entries_WIRE_4_ae_final), // @[TLB.scala:170:77] .io_x_ae_stage2 (_entries_WIRE_4_ae_stage2), // @[TLB.scala:170:77] .io_x_pf (_entries_WIRE_4_pf), // @[TLB.scala:170:77] .io_x_gf (_entries_WIRE_4_gf), // @[TLB.scala:170:77] .io_x_sw (_entries_WIRE_4_sw), // @[TLB.scala:170:77] .io_x_sx (_entries_WIRE_4_sx), // @[TLB.scala:170:77] .io_x_sr (_entries_WIRE_4_sr), // @[TLB.scala:170:77] .io_x_hw (_entries_WIRE_4_hw), // @[TLB.scala:170:77] .io_x_hx (_entries_WIRE_4_hx), // @[TLB.scala:170:77] .io_x_hr (_entries_WIRE_4_hr), // @[TLB.scala:170:77] .io_x_pw (_entries_WIRE_4_pw), // @[TLB.scala:170:77] .io_x_px (_entries_WIRE_4_px), // @[TLB.scala:170:77] .io_x_pr (_entries_WIRE_4_pr), // @[TLB.scala:170:77] .io_x_ppp (_entries_WIRE_4_ppp), // @[TLB.scala:170:77] .io_x_pal (_entries_WIRE_4_pal), // @[TLB.scala:170:77] .io_x_paa (_entries_WIRE_4_paa), // @[TLB.scala:170:77] .io_x_eff (_entries_WIRE_4_eff), // @[TLB.scala:170:77] .io_x_c (_entries_WIRE_4_c), // @[TLB.scala:170:77] .io_x_fragmented_superpage (_entries_WIRE_4_fragmented_superpage), // @[TLB.scala:170:77] .io_y_ppn (_entries_barrier_2_io_y_ppn), .io_y_u (_entries_barrier_2_io_y_u), .io_y_ae_ptw (_entries_barrier_2_io_y_ae_ptw), .io_y_ae_final (_entries_barrier_2_io_y_ae_final), .io_y_ae_stage2 (_entries_barrier_2_io_y_ae_stage2), .io_y_pf (_entries_barrier_2_io_y_pf), .io_y_gf (_entries_barrier_2_io_y_gf), .io_y_sw (_entries_barrier_2_io_y_sw), .io_y_sx (_entries_barrier_2_io_y_sx), .io_y_sr (_entries_barrier_2_io_y_sr), .io_y_hw (_entries_barrier_2_io_y_hw), .io_y_hx (_entries_barrier_2_io_y_hx), .io_y_hr (_entries_barrier_2_io_y_hr), .io_y_pw (_entries_barrier_2_io_y_pw), .io_y_px (_entries_barrier_2_io_y_px), .io_y_pr (_entries_barrier_2_io_y_pr), .io_y_ppp (_entries_barrier_2_io_y_ppp), .io_y_pal (_entries_barrier_2_io_y_pal), .io_y_paa (_entries_barrier_2_io_y_paa), .io_y_eff (_entries_barrier_2_io_y_eff), .io_y_c (_entries_barrier_2_io_y_c) ); // @[package.scala:267:25] OptimizationBarrier_TLBEntryData_60 entries_barrier_3 ( // @[package.scala:267:25] .clock (clock), .reset (reset), .io_x_ppn (_entries_WIRE_6_ppn), // @[TLB.scala:170:77] .io_x_u (_entries_WIRE_6_u), // @[TLB.scala:170:77] .io_x_g (_entries_WIRE_6_g), // @[TLB.scala:170:77] .io_x_ae_ptw (_entries_WIRE_6_ae_ptw), // @[TLB.scala:170:77] .io_x_ae_final (_entries_WIRE_6_ae_final), // @[TLB.scala:170:77] .io_x_ae_stage2 (_entries_WIRE_6_ae_stage2), // @[TLB.scala:170:77] .io_x_pf (_entries_WIRE_6_pf), // @[TLB.scala:170:77] .io_x_gf (_entries_WIRE_6_gf), // @[TLB.scala:170:77] .io_x_sw (_entries_WIRE_6_sw), // @[TLB.scala:170:77] .io_x_sx (_entries_WIRE_6_sx), // @[TLB.scala:170:77] .io_x_sr (_entries_WIRE_6_sr), // @[TLB.scala:170:77] .io_x_hw (_entries_WIRE_6_hw), // @[TLB.scala:170:77] .io_x_hx (_entries_WIRE_6_hx), // @[TLB.scala:170:77] .io_x_hr (_entries_WIRE_6_hr), // @[TLB.scala:170:77] .io_x_pw (_entries_WIRE_6_pw), // @[TLB.scala:170:77] .io_x_px (_entries_WIRE_6_px), // @[TLB.scala:170:77] .io_x_pr (_entries_WIRE_6_pr), // @[TLB.scala:170:77] .io_x_ppp (_entries_WIRE_6_ppp), // @[TLB.scala:170:77] .io_x_pal (_entries_WIRE_6_pal), // @[TLB.scala:170:77] .io_x_paa (_entries_WIRE_6_paa), // @[TLB.scala:170:77] .io_x_eff (_entries_WIRE_6_eff), // @[TLB.scala:170:77] .io_x_c (_entries_WIRE_6_c), // @[TLB.scala:170:77] .io_x_fragmented_superpage (_entries_WIRE_6_fragmented_superpage), // @[TLB.scala:170:77] .io_y_ppn (_entries_barrier_3_io_y_ppn), .io_y_u (_entries_barrier_3_io_y_u), .io_y_ae_ptw (_entries_barrier_3_io_y_ae_ptw), .io_y_ae_final (_entries_barrier_3_io_y_ae_final), .io_y_ae_stage2 (_entries_barrier_3_io_y_ae_stage2), .io_y_pf (_entries_barrier_3_io_y_pf), .io_y_gf (_entries_barrier_3_io_y_gf), .io_y_sw (_entries_barrier_3_io_y_sw), .io_y_sx (_entries_barrier_3_io_y_sx), .io_y_sr (_entries_barrier_3_io_y_sr), .io_y_hw (_entries_barrier_3_io_y_hw), .io_y_hx (_entries_barrier_3_io_y_hx), .io_y_hr (_entries_barrier_3_io_y_hr), .io_y_pw (_entries_barrier_3_io_y_pw), .io_y_px (_entries_barrier_3_io_y_px), .io_y_pr (_entries_barrier_3_io_y_pr), .io_y_ppp (_entries_barrier_3_io_y_ppp), .io_y_pal (_entries_barrier_3_io_y_pal), .io_y_paa (_entries_barrier_3_io_y_paa), .io_y_eff (_entries_barrier_3_io_y_eff), .io_y_c (_entries_barrier_3_io_y_c) ); // @[package.scala:267:25] OptimizationBarrier_TLBEntryData_61 entries_barrier_4 ( // @[package.scala:267:25] .clock (clock), .reset (reset), .io_x_ppn (_entries_WIRE_8_ppn), // @[TLB.scala:170:77] .io_x_u (_entries_WIRE_8_u), // @[TLB.scala:170:77] .io_x_g (_entries_WIRE_8_g), // @[TLB.scala:170:77] .io_x_ae_ptw (_entries_WIRE_8_ae_ptw), // @[TLB.scala:170:77] .io_x_ae_final (_entries_WIRE_8_ae_final), // @[TLB.scala:170:77] .io_x_ae_stage2 (_entries_WIRE_8_ae_stage2), // @[TLB.scala:170:77] .io_x_pf (_entries_WIRE_8_pf), // @[TLB.scala:170:77] .io_x_gf (_entries_WIRE_8_gf), // @[TLB.scala:170:77] .io_x_sw (_entries_WIRE_8_sw), // @[TLB.scala:170:77] .io_x_sx (_entries_WIRE_8_sx), // @[TLB.scala:170:77] .io_x_sr (_entries_WIRE_8_sr), // @[TLB.scala:170:77] .io_x_hw (_entries_WIRE_8_hw), // @[TLB.scala:170:77] .io_x_hx (_entries_WIRE_8_hx), // @[TLB.scala:170:77] .io_x_hr (_entries_WIRE_8_hr), // @[TLB.scala:170:77] .io_x_pw (_entries_WIRE_8_pw), // @[TLB.scala:170:77] .io_x_px (_entries_WIRE_8_px), // @[TLB.scala:170:77] .io_x_pr (_entries_WIRE_8_pr), // @[TLB.scala:170:77] .io_x_ppp (_entries_WIRE_8_ppp), // @[TLB.scala:170:77] .io_x_pal (_entries_WIRE_8_pal), // @[TLB.scala:170:77] .io_x_paa (_entries_WIRE_8_paa), // @[TLB.scala:170:77] .io_x_eff (_entries_WIRE_8_eff), // @[TLB.scala:170:77] .io_x_c (_entries_WIRE_8_c), // @[TLB.scala:170:77] .io_x_fragmented_superpage (_entries_WIRE_8_fragmented_superpage), // @[TLB.scala:170:77] .io_y_ppn (_entries_barrier_4_io_y_ppn), .io_y_u (_entries_barrier_4_io_y_u), .io_y_ae_ptw (_entries_barrier_4_io_y_ae_ptw), .io_y_ae_final (_entries_barrier_4_io_y_ae_final), .io_y_ae_stage2 (_entries_barrier_4_io_y_ae_stage2), .io_y_pf (_entries_barrier_4_io_y_pf), .io_y_gf (_entries_barrier_4_io_y_gf), .io_y_sw (_entries_barrier_4_io_y_sw), .io_y_sx (_entries_barrier_4_io_y_sx), .io_y_sr (_entries_barrier_4_io_y_sr), .io_y_hw (_entries_barrier_4_io_y_hw), .io_y_hx (_entries_barrier_4_io_y_hx), .io_y_hr (_entries_barrier_4_io_y_hr), .io_y_pw (_entries_barrier_4_io_y_pw), .io_y_px (_entries_barrier_4_io_y_px), .io_y_pr (_entries_barrier_4_io_y_pr), .io_y_ppp (_entries_barrier_4_io_y_ppp), .io_y_pal (_entries_barrier_4_io_y_pal), .io_y_paa (_entries_barrier_4_io_y_paa), .io_y_eff (_entries_barrier_4_io_y_eff), .io_y_c (_entries_barrier_4_io_y_c) ); // @[package.scala:267:25] OptimizationBarrier_TLBEntryData_62 entries_barrier_5 ( // @[package.scala:267:25] .clock (clock), .reset (reset), .io_x_ppn (_entries_WIRE_10_ppn), // @[TLB.scala:170:77] .io_x_u (_entries_WIRE_10_u), // @[TLB.scala:170:77] .io_x_g (_entries_WIRE_10_g), // @[TLB.scala:170:77] .io_x_ae_ptw (_entries_WIRE_10_ae_ptw), // @[TLB.scala:170:77] .io_x_ae_final (_entries_WIRE_10_ae_final), // @[TLB.scala:170:77] .io_x_ae_stage2 (_entries_WIRE_10_ae_stage2), // @[TLB.scala:170:77] .io_x_pf (_entries_WIRE_10_pf), // @[TLB.scala:170:77] .io_x_gf (_entries_WIRE_10_gf), // @[TLB.scala:170:77] .io_x_sw (_entries_WIRE_10_sw), // @[TLB.scala:170:77] .io_x_sx (_entries_WIRE_10_sx), // @[TLB.scala:170:77] .io_x_sr (_entries_WIRE_10_sr), // @[TLB.scala:170:77] .io_x_hw (_entries_WIRE_10_hw), // @[TLB.scala:170:77] .io_x_hx (_entries_WIRE_10_hx), // @[TLB.scala:170:77] .io_x_hr (_entries_WIRE_10_hr), // @[TLB.scala:170:77] .io_x_pw (_entries_WIRE_10_pw), // @[TLB.scala:170:77] .io_x_px (_entries_WIRE_10_px), // @[TLB.scala:170:77] .io_x_pr (_entries_WIRE_10_pr), // @[TLB.scala:170:77] .io_x_ppp (_entries_WIRE_10_ppp), // @[TLB.scala:170:77] .io_x_pal (_entries_WIRE_10_pal), // @[TLB.scala:170:77] .io_x_paa (_entries_WIRE_10_paa), // @[TLB.scala:170:77] .io_x_eff (_entries_WIRE_10_eff), // @[TLB.scala:170:77] .io_x_c (_entries_WIRE_10_c), // @[TLB.scala:170:77] .io_x_fragmented_superpage (_entries_WIRE_10_fragmented_superpage), // @[TLB.scala:170:77] .io_y_ppn (_entries_barrier_5_io_y_ppn), .io_y_u (_entries_barrier_5_io_y_u), .io_y_ae_ptw (_entries_barrier_5_io_y_ae_ptw), .io_y_ae_final (_entries_barrier_5_io_y_ae_final), .io_y_ae_stage2 (_entries_barrier_5_io_y_ae_stage2), .io_y_pf (_entries_barrier_5_io_y_pf), .io_y_gf (_entries_barrier_5_io_y_gf), .io_y_sw (_entries_barrier_5_io_y_sw), .io_y_sx (_entries_barrier_5_io_y_sx), .io_y_sr (_entries_barrier_5_io_y_sr), .io_y_hw (_entries_barrier_5_io_y_hw), .io_y_hx (_entries_barrier_5_io_y_hx), .io_y_hr (_entries_barrier_5_io_y_hr) ); // @[package.scala:267:25] assign io_req_ready = io_req_ready_0; // @[TLB.scala:318:7] assign io_resp_miss = io_resp_miss_0; // @[TLB.scala:318:7] assign io_resp_paddr = io_resp_paddr_0; // @[TLB.scala:318:7] assign io_ptw_req_valid = io_ptw_req_valid_0; // @[TLB.scala:318:7] assign io_ptw_req_bits_bits_addr = io_ptw_req_bits_bits_addr_0; // @[TLB.scala:318:7] assign io_ptw_req_bits_bits_need_gpa = io_ptw_req_bits_bits_need_gpa_0; // @[TLB.scala:318:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async } File AsyncCrossing.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressSet, NodeHandle} import freechips.rocketchip.prci.{AsynchronousCrossing} import freechips.rocketchip.subsystem.CrossingWrapper import freechips.rocketchip.util.{AsyncQueueParams, ToAsyncBundle, FromAsyncBundle, Pow2ClockDivider, property} class TLAsyncCrossingSource(sync: Option[Int])(implicit p: Parameters) extends LazyModule { def this(x: Int)(implicit p: Parameters) = this(Some(x)) def this()(implicit p: Parameters) = this(None) val node = TLAsyncSourceNode(sync) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = (Seq("TLAsyncCrossingSource") ++ node.in.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => val bce = edgeIn.manager.anySupportAcquireB && edgeIn.client.anySupportProbe val psync = sync.getOrElse(edgeOut.manager.async.sync) val params = edgeOut.manager.async.copy(sync = psync) out.a <> ToAsyncBundle(in.a, params) in.d <> FromAsyncBundle(out.d, psync) property.cover(in.a, "TL_ASYNC_CROSSING_SOURCE_A", "MemorySystem;;TLAsyncCrossingSource Channel A") property.cover(in.d, "TL_ASYNC_CROSSING_SOURCE_D", "MemorySystem;;TLAsyncCrossingSource Channel D") if (bce) { in.b <> FromAsyncBundle(out.b, psync) out.c <> ToAsyncBundle(in.c, params) out.e <> ToAsyncBundle(in.e, params) property.cover(in.b, "TL_ASYNC_CROSSING_SOURCE_B", "MemorySystem;;TLAsyncCrossingSource Channel B") property.cover(in.c, "TL_ASYNC_CROSSING_SOURCE_C", "MemorySystem;;TLAsyncCrossingSource Channel C") property.cover(in.e, "TL_ASYNC_CROSSING_SOURCE_E", "MemorySystem;;TLAsyncCrossingSource Channel E") } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ridx := 0.U out.c.widx := 0.U out.e.widx := 0.U } } } } class TLAsyncCrossingSink(params: AsyncQueueParams = AsyncQueueParams())(implicit p: Parameters) extends LazyModule { val node = TLAsyncSinkNode(params) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = (Seq("TLAsyncCrossingSink") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => val bce = edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe out.a <> FromAsyncBundle(in.a, params.sync) in.d <> ToAsyncBundle(out.d, params) property.cover(out.a, "TL_ASYNC_CROSSING_SINK_A", "MemorySystem;;TLAsyncCrossingSink Channel A") property.cover(out.d, "TL_ASYNC_CROSSING_SINK_D", "MemorySystem;;TLAsyncCrossingSink Channel D") if (bce) { in.b <> ToAsyncBundle(out.b, params) out.c <> FromAsyncBundle(in.c, params.sync) out.e <> FromAsyncBundle(in.e, params.sync) property.cover(out.b, "TL_ASYNC_CROSSING_SINK_B", "MemorySystem;;TLAsyncCrossingSinkChannel B") property.cover(out.c, "TL_ASYNC_CROSSING_SINK_C", "MemorySystem;;TLAsyncCrossingSink Channel C") property.cover(out.e, "TL_ASYNC_CROSSING_SINK_E", "MemorySystem;;TLAsyncCrossingSink Channel E") } else { in.b.widx := 0.U in.c.ridx := 0.U in.e.ridx := 0.U out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLAsyncCrossingSource { def apply()(implicit p: Parameters): TLAsyncSourceNode = apply(None) def apply(sync: Int)(implicit p: Parameters): TLAsyncSourceNode = apply(Some(sync)) def apply(sync: Option[Int])(implicit p: Parameters): TLAsyncSourceNode = { val asource = LazyModule(new TLAsyncCrossingSource(sync)) asource.node } } object TLAsyncCrossingSink { def apply(params: AsyncQueueParams = AsyncQueueParams())(implicit p: Parameters) = { val asink = LazyModule(new TLAsyncCrossingSink(params)) asink.node } } @deprecated("TLAsyncCrossing is fragile. Use TLAsyncCrossingSource and TLAsyncCrossingSink", "rocket-chip 1.2") class TLAsyncCrossing(params: AsyncQueueParams = AsyncQueueParams())(implicit p: Parameters) extends LazyModule { val source = LazyModule(new TLAsyncCrossingSource()) val sink = LazyModule(new TLAsyncCrossingSink(params)) val node = NodeHandle(source.node, sink.node) sink.node := source.node lazy val module = new Impl class Impl extends LazyModuleImp(this) { val io = IO(new Bundle { val in_clock = Input(Clock()) val in_reset = Input(Bool()) val out_clock = Input(Clock()) val out_reset = Input(Bool()) }) source.module.clock := io.in_clock source.module.reset := io.in_reset sink.module.clock := io.out_clock sink.module.reset := io.out_reset } } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMAsyncCrossing(txns: Int, params: AsynchronousCrossing = AsynchronousCrossing())(implicit p: Parameters) extends LazyModule { val model = LazyModule(new TLRAMModel("AsyncCrossing")) val fuzz = LazyModule(new TLFuzzer(txns)) val island = LazyModule(new CrossingWrapper(params)) val ram = island { LazyModule(new TLRAM(AddressSet(0x0, 0x3ff))) } island.crossTLIn(ram.node) := TLFragmenter(4, 256) := TLDelayer(0.1) := model.node := fuzz.node lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished // Shove the RAM into another clock domain val clocks = Module(new Pow2ClockDivider(2)) island.module.clock := clocks.io.clock_out } } class TLRAMAsyncCrossingTest(txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut_wide = Module(LazyModule(new TLRAMAsyncCrossing(txns)).module) val dut_narrow = Module(LazyModule(new TLRAMAsyncCrossing(txns, AsynchronousCrossing(safe = false, narrow = true))).module) io.finished := dut_wide.io.finished && dut_narrow.io.finished dut_wide.io.start := io.start dut_narrow.io.start := io.start } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } }
module TLAsyncCrossingSource_a9d32s1k1z2u( // @[AsyncCrossing.scala:23:9] input clock, // @[AsyncCrossing.scala:23:9] input reset, // @[AsyncCrossing.scala:23:9] output auto_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [8:0] auto_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [31:0] auto_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_a_mem_0_opcode, // @[LazyModuleImp.scala:107:25] output [8:0] auto_out_a_mem_0_address, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_a_mem_0_data, // @[LazyModuleImp.scala:107:25] input auto_out_a_ridx, // @[LazyModuleImp.scala:107:25] output auto_out_a_widx, // @[LazyModuleImp.scala:107:25] input auto_out_a_safe_ridx_valid, // @[LazyModuleImp.scala:107:25] output auto_out_a_safe_widx_valid, // @[LazyModuleImp.scala:107:25] output auto_out_a_safe_source_reset_n, // @[LazyModuleImp.scala:107:25] input auto_out_a_safe_sink_reset_n, // @[LazyModuleImp.scala:107:25] input [2:0] auto_out_d_mem_0_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_d_mem_0_size, // @[LazyModuleImp.scala:107:25] input auto_out_d_mem_0_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_out_d_mem_0_data, // @[LazyModuleImp.scala:107:25] output auto_out_d_ridx, // @[LazyModuleImp.scala:107:25] input auto_out_d_widx, // @[LazyModuleImp.scala:107:25] output auto_out_d_safe_ridx_valid, // @[LazyModuleImp.scala:107:25] input auto_out_d_safe_widx_valid, // @[LazyModuleImp.scala:107:25] input auto_out_d_safe_source_reset_n, // @[LazyModuleImp.scala:107:25] output auto_out_d_safe_sink_reset_n // @[LazyModuleImp.scala:107:25] ); wire _nodeIn_d_sink_io_deq_valid; // @[AsyncQueue.scala:211:22] wire [2:0] _nodeIn_d_sink_io_deq_bits_opcode; // @[AsyncQueue.scala:211:22] wire [1:0] _nodeIn_d_sink_io_deq_bits_param; // @[AsyncQueue.scala:211:22] wire [1:0] _nodeIn_d_sink_io_deq_bits_size; // @[AsyncQueue.scala:211:22] wire _nodeIn_d_sink_io_deq_bits_source; // @[AsyncQueue.scala:211:22] wire _nodeIn_d_sink_io_deq_bits_sink; // @[AsyncQueue.scala:211:22] wire _nodeIn_d_sink_io_deq_bits_denied; // @[AsyncQueue.scala:211:22] wire _nodeIn_d_sink_io_deq_bits_corrupt; // @[AsyncQueue.scala:211:22] wire _nodeOut_a_source_io_enq_ready; // @[AsyncQueue.scala:220:24] TLMonitor_48 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (_nodeOut_a_source_io_enq_ready), // @[AsyncQueue.scala:220:24] .io_in_a_valid (auto_in_a_valid), .io_in_a_bits_opcode (auto_in_a_bits_opcode), .io_in_a_bits_address (auto_in_a_bits_address), .io_in_d_ready (auto_in_d_ready), .io_in_d_valid (_nodeIn_d_sink_io_deq_valid), // @[AsyncQueue.scala:211:22] .io_in_d_bits_opcode (_nodeIn_d_sink_io_deq_bits_opcode), // @[AsyncQueue.scala:211:22] .io_in_d_bits_param (_nodeIn_d_sink_io_deq_bits_param), // @[AsyncQueue.scala:211:22] .io_in_d_bits_size (_nodeIn_d_sink_io_deq_bits_size), // @[AsyncQueue.scala:211:22] .io_in_d_bits_source (_nodeIn_d_sink_io_deq_bits_source), // @[AsyncQueue.scala:211:22] .io_in_d_bits_sink (_nodeIn_d_sink_io_deq_bits_sink), // @[AsyncQueue.scala:211:22] .io_in_d_bits_denied (_nodeIn_d_sink_io_deq_bits_denied), // @[AsyncQueue.scala:211:22] .io_in_d_bits_corrupt (_nodeIn_d_sink_io_deq_bits_corrupt) // @[AsyncQueue.scala:211:22] ); // @[Nodes.scala:27:25] AsyncQueueSource_TLBundleA_a9d32s1k1z2u nodeOut_a_source ( // @[AsyncQueue.scala:220:24] .clock (clock), .reset (reset), .io_enq_ready (_nodeOut_a_source_io_enq_ready), .io_enq_valid (auto_in_a_valid), .io_enq_bits_opcode (auto_in_a_bits_opcode), .io_enq_bits_address (auto_in_a_bits_address), .io_enq_bits_data (auto_in_a_bits_data), .io_async_mem_0_opcode (auto_out_a_mem_0_opcode), .io_async_mem_0_address (auto_out_a_mem_0_address), .io_async_mem_0_data (auto_out_a_mem_0_data), .io_async_ridx (auto_out_a_ridx), .io_async_widx (auto_out_a_widx), .io_async_safe_ridx_valid (auto_out_a_safe_ridx_valid), .io_async_safe_widx_valid (auto_out_a_safe_widx_valid), .io_async_safe_source_reset_n (auto_out_a_safe_source_reset_n), .io_async_safe_sink_reset_n (auto_out_a_safe_sink_reset_n) ); // @[AsyncQueue.scala:220:24] AsyncQueueSink_TLBundleD_a9d32s1k1z2u nodeIn_d_sink ( // @[AsyncQueue.scala:211:22] .clock (clock), .reset (reset), .io_deq_ready (auto_in_d_ready), .io_deq_valid (_nodeIn_d_sink_io_deq_valid), .io_deq_bits_opcode (_nodeIn_d_sink_io_deq_bits_opcode), .io_deq_bits_param (_nodeIn_d_sink_io_deq_bits_param), .io_deq_bits_size (_nodeIn_d_sink_io_deq_bits_size), .io_deq_bits_source (_nodeIn_d_sink_io_deq_bits_source), .io_deq_bits_sink (_nodeIn_d_sink_io_deq_bits_sink), .io_deq_bits_denied (_nodeIn_d_sink_io_deq_bits_denied), .io_deq_bits_data (auto_in_d_bits_data), .io_deq_bits_corrupt (_nodeIn_d_sink_io_deq_bits_corrupt), .io_async_mem_0_opcode (auto_out_d_mem_0_opcode), .io_async_mem_0_size (auto_out_d_mem_0_size), .io_async_mem_0_source (auto_out_d_mem_0_source), .io_async_mem_0_data (auto_out_d_mem_0_data), .io_async_ridx (auto_out_d_ridx), .io_async_widx (auto_out_d_widx), .io_async_safe_ridx_valid (auto_out_d_safe_ridx_valid), .io_async_safe_widx_valid (auto_out_d_safe_widx_valid), .io_async_safe_source_reset_n (auto_out_d_safe_source_reset_n), .io_async_safe_sink_reset_n (auto_out_d_safe_sink_reset_n) ); // @[AsyncQueue.scala:211:22] assign auto_in_a_ready = _nodeOut_a_source_io_enq_ready; // @[AsyncQueue.scala:220:24] assign auto_in_d_valid = _nodeIn_d_sink_io_deq_valid; // @[AsyncQueue.scala:211:22] assign auto_in_d_bits_opcode = _nodeIn_d_sink_io_deq_bits_opcode; // @[AsyncQueue.scala:211:22] assign auto_in_d_bits_param = _nodeIn_d_sink_io_deq_bits_param; // @[AsyncQueue.scala:211:22] assign auto_in_d_bits_size = _nodeIn_d_sink_io_deq_bits_size; // @[AsyncQueue.scala:211:22] assign auto_in_d_bits_source = _nodeIn_d_sink_io_deq_bits_source; // @[AsyncQueue.scala:211:22] assign auto_in_d_bits_sink = _nodeIn_d_sink_io_deq_bits_sink; // @[AsyncQueue.scala:211:22] assign auto_in_d_bits_denied = _nodeIn_d_sink_io_deq_bits_denied; // @[AsyncQueue.scala:211:22] assign auto_in_d_bits_corrupt = _nodeIn_d_sink_io_deq_bits_corrupt; // @[AsyncQueue.scala:211:22] endmodule
Generate the Verilog code corresponding to the following Chisel files. File DescribedSRAM.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3.{Data, SyncReadMem, Vec} import chisel3.util.log2Ceil object DescribedSRAM { def apply[T <: Data]( name: String, desc: String, size: BigInt, // depth data: T ): SyncReadMem[T] = { val mem = SyncReadMem(size, data) mem.suggestName(name) val granWidth = data match { case v: Vec[_] => v.head.getWidth case d => d.getWidth } val uid = 0 Annotated.srams( component = mem, name = name, address_width = log2Ceil(size), data_width = data.getWidth, depth = size, description = desc, write_mask_granularity = granWidth ) mem } }
module array_2_0_0( // @[DescribedSRAM.scala:17:26] input [7:0] R0_addr, input R0_en, input R0_clk, output [127:0] R0_data, input [7:0] W0_addr, input W0_en, input W0_clk, input [127:0] W0_data, input [1:0] W0_mask ); array_0_0_ext array_0_0_ext ( // @[DescribedSRAM.scala:17:26] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data), .W0_mask (W0_mask) ); // @[DescribedSRAM.scala:17:26] endmodule
Generate the Verilog code corresponding to the following Chisel files. File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module ClockCrossingReg_w79( // @[SynchronizerReg.scala:191:7] input clock, // @[SynchronizerReg.scala:191:7] input reset, // @[SynchronizerReg.scala:191:7] input [78:0] io_d, // @[SynchronizerReg.scala:195:14] output [78:0] io_q, // @[SynchronizerReg.scala:195:14] input io_en // @[SynchronizerReg.scala:195:14] ); wire [78:0] io_d_0 = io_d; // @[SynchronizerReg.scala:191:7] wire io_en_0 = io_en; // @[SynchronizerReg.scala:191:7] wire [78:0] io_q_0; // @[SynchronizerReg.scala:191:7] reg [78:0] cdc_reg; // @[SynchronizerReg.scala:201:76] assign io_q_0 = cdc_reg; // @[SynchronizerReg.scala:191:7, :201:76] always @(posedge clock) begin // @[SynchronizerReg.scala:191:7] if (io_en_0) // @[SynchronizerReg.scala:191:7] cdc_reg <= io_d_0; // @[SynchronizerReg.scala:191:7, :201:76] always @(posedge) assign io_q = io_q_0; // @[SynchronizerReg.scala:191:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Transposer.scala: package gemmini import chisel3._ import chisel3.util._ import Util._ trait Transposer[T <: Data] extends Module { def dim: Int def dataType: T val io = IO(new Bundle { val inRow = Flipped(Decoupled(Vec(dim, dataType))) val outCol = Decoupled(Vec(dim, dataType)) }) } class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose val sMoveUp :: sMoveLeft :: Nil = Enum(2) val state = RegInit(sMoveUp) val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1) val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1) io.outCol.valid := 0.U io.inRow.ready := 0.U switch(state) { is(sMoveUp) { io.inRow.ready := upCounter <= dim.U io.outCol.valid := leftCounter > 0.U when(io.inRow.fire) { upCounter := upCounter + 1.U } when(upCounter === (dim-1).U) { state := sMoveLeft leftCounter := 0.U } when(io.outCol.fire) { leftCounter := leftCounter - 1.U } } is(sMoveLeft) { io.inRow.ready := leftCounter <= dim.U // TODO: this is naive io.outCol.valid := upCounter > 0.U when(leftCounter === (dim-1).U) { state := sMoveUp } when(io.inRow.fire) { leftCounter := leftCounter + 1.U upCounter := 0.U } when(io.outCol.fire) { upCounter := upCounter - 1.U } } } // Propagate input from bottom row to top row systolically in the move up phase // TODO: need to iterate over columns to connect Chisel values of type T // Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?) for (colIdx <- 0 until dim) { regArray.foldRight(io.inRow.bits(colIdx)) { case (regRow, prevReg) => when (state === sMoveUp) { regRow(colIdx) := prevReg } regRow(colIdx) } } // Propagate input from right side to left side systolically in the move left phase for (rowIdx <- 0 until dim) { regArrayT.foldRight(io.inRow.bits(rowIdx)) { case (regCol, prevReg) => when (state === sMoveLeft) { regCol(rowIdx) := prevReg } regCol(rowIdx) } } // Pull from the left side or the top side based on the state for (idx <- 0 until dim) { when (state === sMoveUp) { io.outCol.bits(idx) := regArray(0)(idx) }.elsewhen(state === sMoveLeft) { io.outCol.bits(idx) := regArrayT(0)(idx) }.otherwise { io.outCol.bits(idx) := DontCare } } } class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val LEFT_DIR = 0.U(1.W) val UP_DIR = 1.U(1.W) class PE extends Module { val io = IO(new Bundle { val inR = Input(dataType) val inD = Input(dataType) val outL = Output(dataType) val outU = Output(dataType) val dir = Input(UInt(1.W)) val en = Input(Bool()) }) val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en) io.outU := reg io.outL := reg } val pes = Seq.fill(dim,dim)(Module(new PE)) val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter val dir = RegInit(LEFT_DIR) // Wire up horizontal signals for (row <- 0 until dim; col <- 0 until dim) { val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL pes(row)(col).io.inR := right_in } // Wire up vertical signals for (row <- 0 until dim; col <- 0 until dim) { val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU pes(row)(col).io.inD := down_in } // Wire up global signals pes.flatten.foreach(_.io.dir := dir) pes.flatten.foreach(_.io.en := io.inRow.fire) io.outCol.valid := true.B io.inRow.ready := true.B val left_out = VecInit(pes.transpose.head.map(_.io.outL)) val up_out = VecInit(pes.head.map(_.io.outU)) io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out) when (io.inRow.fire) { counter := wrappingAdd(counter, 1.U, dim) } when (counter === (dim-1).U && io.inRow.fire) { dir := ~dir } } class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose // state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise val state = RegInit(0.U(1.W)) val countInc = io.inRow.fire || io.outCol.fire val (countValue, countWrap) = Counter(countInc, dim) io.inRow.ready := state === 0.U io.outCol.valid := state === 1.U for (i <- 0 until dim) { for (j <- 0 until dim) { when(countValue === i.U && io.inRow.fire) { regArray(i)(j) := io.inRow.bits(j) } } } for (i <- 0 until dim) { io.outCol.bits(i) := 0.U for (j <- 0 until dim) { when(countValue === j.U) { io.outCol.bits(i) := regArrayT(j)(i) } } } when (io.inRow.fire && countWrap) { state := 1.U } when (io.outCol.fire && countWrap) { state := 0.U } assert(!(state === 0.U) || !io.outCol.fire) assert(!(state === 1.U) || !io.inRow.fire) }
module PE_245( // @[Transposer.scala:100:9] input clock, // @[Transposer.scala:100:9] input reset, // @[Transposer.scala:100:9] input [7:0] io_inR, // @[Transposer.scala:101:16] input [7:0] io_inD, // @[Transposer.scala:101:16] output [7:0] io_outL, // @[Transposer.scala:101:16] output [7:0] io_outU, // @[Transposer.scala:101:16] input io_dir, // @[Transposer.scala:101:16] input io_en // @[Transposer.scala:101:16] ); wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9] wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9] wire io_dir_0 = io_dir; // @[Transposer.scala:100:9] wire io_en_0 = io_en; // @[Transposer.scala:100:9] wire [7:0] io_outL_0; // @[Transposer.scala:100:9] wire [7:0] io_outU_0; // @[Transposer.scala:100:9] wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36] wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}] reg [7:0] reg_0; // @[Transposer.scala:110:24] assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24] assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24] always @(posedge clock) begin // @[Transposer.scala:100:9] if (io_en_0) // @[Transposer.scala:100:9] reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}] always @(posedge) assign io_outL = io_outL_0; // @[Transposer.scala:100:9] assign io_outU = io_outU_0; // @[Transposer.scala:100:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } }
module BranchKillableQueue_10( // @[util.scala:448:7] input clock, // @[util.scala:448:7] input reset, // @[util.scala:448:7] output io_enq_ready, // @[util.scala:453:14] input io_enq_valid, // @[util.scala:453:14] input [6:0] io_enq_bits_uop_uopc, // @[util.scala:453:14] input [31:0] io_enq_bits_uop_inst, // @[util.scala:453:14] input [31:0] io_enq_bits_uop_debug_inst, // @[util.scala:453:14] input io_enq_bits_uop_is_rvc, // @[util.scala:453:14] input [39:0] io_enq_bits_uop_debug_pc, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_iq_type, // @[util.scala:453:14] input [9:0] io_enq_bits_uop_fu_code, // @[util.scala:453:14] input [3:0] io_enq_bits_uop_ctrl_br_type, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_ctrl_op1_sel, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_ctrl_op2_sel, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_ctrl_imm_sel, // @[util.scala:453:14] input [4:0] io_enq_bits_uop_ctrl_op_fcn, // @[util.scala:453:14] input io_enq_bits_uop_ctrl_fcn_dw, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_ctrl_csr_cmd, // @[util.scala:453:14] input io_enq_bits_uop_ctrl_is_load, // @[util.scala:453:14] input io_enq_bits_uop_ctrl_is_sta, // @[util.scala:453:14] input io_enq_bits_uop_ctrl_is_std, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_iw_state, // @[util.scala:453:14] input io_enq_bits_uop_iw_p1_poisoned, // @[util.scala:453:14] input io_enq_bits_uop_iw_p2_poisoned, // @[util.scala:453:14] input io_enq_bits_uop_is_br, // @[util.scala:453:14] input io_enq_bits_uop_is_jalr, // @[util.scala:453:14] input io_enq_bits_uop_is_jal, // @[util.scala:453:14] input io_enq_bits_uop_is_sfb, // @[util.scala:453:14] input [7:0] io_enq_bits_uop_br_mask, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_br_tag, // @[util.scala:453:14] input [3:0] io_enq_bits_uop_ftq_idx, // @[util.scala:453:14] input io_enq_bits_uop_edge_inst, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_pc_lob, // @[util.scala:453:14] input io_enq_bits_uop_taken, // @[util.scala:453:14] input [19:0] io_enq_bits_uop_imm_packed, // @[util.scala:453:14] input [11:0] io_enq_bits_uop_csr_addr, // @[util.scala:453:14] input [4:0] io_enq_bits_uop_rob_idx, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_ldq_idx, // @[util.scala:453:14] input [2:0] io_enq_bits_uop_stq_idx, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_rxq_idx, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_pdst, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_prs1, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_prs2, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_prs3, // @[util.scala:453:14] input [3:0] io_enq_bits_uop_ppred, // @[util.scala:453:14] input io_enq_bits_uop_prs1_busy, // @[util.scala:453:14] input io_enq_bits_uop_prs2_busy, // @[util.scala:453:14] input io_enq_bits_uop_prs3_busy, // @[util.scala:453:14] input io_enq_bits_uop_ppred_busy, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_stale_pdst, // @[util.scala:453:14] input io_enq_bits_uop_exception, // @[util.scala:453:14] input [63:0] io_enq_bits_uop_exc_cause, // @[util.scala:453:14] input io_enq_bits_uop_bypassable, // @[util.scala:453:14] input [4:0] io_enq_bits_uop_mem_cmd, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_mem_size, // @[util.scala:453:14] input io_enq_bits_uop_mem_signed, // @[util.scala:453:14] input io_enq_bits_uop_is_fence, // @[util.scala:453:14] input io_enq_bits_uop_is_fencei, // @[util.scala:453:14] input io_enq_bits_uop_is_amo, // @[util.scala:453:14] input io_enq_bits_uop_uses_ldq, // @[util.scala:453:14] input io_enq_bits_uop_uses_stq, // @[util.scala:453:14] input io_enq_bits_uop_is_sys_pc2epc, // @[util.scala:453:14] input io_enq_bits_uop_is_unique, // @[util.scala:453:14] input io_enq_bits_uop_flush_on_commit, // @[util.scala:453:14] input io_enq_bits_uop_ldst_is_rs1, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_ldst, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_lrs1, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_lrs2, // @[util.scala:453:14] input [5:0] io_enq_bits_uop_lrs3, // @[util.scala:453:14] input io_enq_bits_uop_ldst_val, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_dst_rtype, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_lrs1_rtype, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_lrs2_rtype, // @[util.scala:453:14] input io_enq_bits_uop_frs3_en, // @[util.scala:453:14] input io_enq_bits_uop_fp_val, // @[util.scala:453:14] input io_enq_bits_uop_fp_single, // @[util.scala:453:14] input io_enq_bits_uop_xcpt_pf_if, // @[util.scala:453:14] input io_enq_bits_uop_xcpt_ae_if, // @[util.scala:453:14] input io_enq_bits_uop_xcpt_ma_if, // @[util.scala:453:14] input io_enq_bits_uop_bp_debug_if, // @[util.scala:453:14] input io_enq_bits_uop_bp_xcpt_if, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_debug_fsrc, // @[util.scala:453:14] input [1:0] io_enq_bits_uop_debug_tsrc, // @[util.scala:453:14] input [64:0] io_enq_bits_data, // @[util.scala:453:14] input io_enq_bits_fflags_valid, // @[util.scala:453:14] input [6:0] io_enq_bits_fflags_bits_uop_uopc, // @[util.scala:453:14] input [31:0] io_enq_bits_fflags_bits_uop_inst, // @[util.scala:453:14] input [31:0] io_enq_bits_fflags_bits_uop_debug_inst, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_is_rvc, // @[util.scala:453:14] input [39:0] io_enq_bits_fflags_bits_uop_debug_pc, // @[util.scala:453:14] input [2:0] io_enq_bits_fflags_bits_uop_iq_type, // @[util.scala:453:14] input [9:0] io_enq_bits_fflags_bits_uop_fu_code, // @[util.scala:453:14] input [3:0] io_enq_bits_fflags_bits_uop_ctrl_br_type, // @[util.scala:453:14] input [1:0] io_enq_bits_fflags_bits_uop_ctrl_op1_sel, // @[util.scala:453:14] input [2:0] io_enq_bits_fflags_bits_uop_ctrl_op2_sel, // @[util.scala:453:14] input [2:0] io_enq_bits_fflags_bits_uop_ctrl_imm_sel, // @[util.scala:453:14] input [4:0] io_enq_bits_fflags_bits_uop_ctrl_op_fcn, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_ctrl_fcn_dw, // @[util.scala:453:14] input [2:0] io_enq_bits_fflags_bits_uop_ctrl_csr_cmd, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_ctrl_is_load, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_ctrl_is_sta, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_ctrl_is_std, // @[util.scala:453:14] input [1:0] io_enq_bits_fflags_bits_uop_iw_state, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_iw_p1_poisoned, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_iw_p2_poisoned, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_is_br, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_is_jalr, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_is_jal, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_is_sfb, // @[util.scala:453:14] input [7:0] io_enq_bits_fflags_bits_uop_br_mask, // @[util.scala:453:14] input [2:0] io_enq_bits_fflags_bits_uop_br_tag, // @[util.scala:453:14] input [3:0] io_enq_bits_fflags_bits_uop_ftq_idx, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_edge_inst, // @[util.scala:453:14] input [5:0] io_enq_bits_fflags_bits_uop_pc_lob, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_taken, // @[util.scala:453:14] input [19:0] io_enq_bits_fflags_bits_uop_imm_packed, // @[util.scala:453:14] input [11:0] io_enq_bits_fflags_bits_uop_csr_addr, // @[util.scala:453:14] input [4:0] io_enq_bits_fflags_bits_uop_rob_idx, // @[util.scala:453:14] input [2:0] io_enq_bits_fflags_bits_uop_ldq_idx, // @[util.scala:453:14] input [2:0] io_enq_bits_fflags_bits_uop_stq_idx, // @[util.scala:453:14] input [1:0] io_enq_bits_fflags_bits_uop_rxq_idx, // @[util.scala:453:14] input [5:0] io_enq_bits_fflags_bits_uop_pdst, // @[util.scala:453:14] input [5:0] io_enq_bits_fflags_bits_uop_prs1, // @[util.scala:453:14] input [5:0] io_enq_bits_fflags_bits_uop_prs2, // @[util.scala:453:14] input [5:0] io_enq_bits_fflags_bits_uop_prs3, // @[util.scala:453:14] input [3:0] io_enq_bits_fflags_bits_uop_ppred, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_prs1_busy, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_prs2_busy, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_prs3_busy, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_ppred_busy, // @[util.scala:453:14] input [5:0] io_enq_bits_fflags_bits_uop_stale_pdst, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_exception, // @[util.scala:453:14] input [63:0] io_enq_bits_fflags_bits_uop_exc_cause, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_bypassable, // @[util.scala:453:14] input [4:0] io_enq_bits_fflags_bits_uop_mem_cmd, // @[util.scala:453:14] input [1:0] io_enq_bits_fflags_bits_uop_mem_size, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_mem_signed, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_is_fence, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_is_fencei, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_is_amo, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_uses_ldq, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_uses_stq, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_is_sys_pc2epc, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_is_unique, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_flush_on_commit, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_ldst_is_rs1, // @[util.scala:453:14] input [5:0] io_enq_bits_fflags_bits_uop_ldst, // @[util.scala:453:14] input [5:0] io_enq_bits_fflags_bits_uop_lrs1, // @[util.scala:453:14] input [5:0] io_enq_bits_fflags_bits_uop_lrs2, // @[util.scala:453:14] input [5:0] io_enq_bits_fflags_bits_uop_lrs3, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_ldst_val, // @[util.scala:453:14] input [1:0] io_enq_bits_fflags_bits_uop_dst_rtype, // @[util.scala:453:14] input [1:0] io_enq_bits_fflags_bits_uop_lrs1_rtype, // @[util.scala:453:14] input [1:0] io_enq_bits_fflags_bits_uop_lrs2_rtype, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_frs3_en, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_fp_val, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_fp_single, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_xcpt_pf_if, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_xcpt_ae_if, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_xcpt_ma_if, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_bp_debug_if, // @[util.scala:453:14] input io_enq_bits_fflags_bits_uop_bp_xcpt_if, // @[util.scala:453:14] input [1:0] io_enq_bits_fflags_bits_uop_debug_fsrc, // @[util.scala:453:14] input [1:0] io_enq_bits_fflags_bits_uop_debug_tsrc, // @[util.scala:453:14] input [4:0] io_enq_bits_fflags_bits_flags, // @[util.scala:453:14] input io_deq_ready, // @[util.scala:453:14] output io_deq_valid, // @[util.scala:453:14] output [6:0] io_deq_bits_uop_uopc, // @[util.scala:453:14] output [31:0] io_deq_bits_uop_inst, // @[util.scala:453:14] output [31:0] io_deq_bits_uop_debug_inst, // @[util.scala:453:14] output io_deq_bits_uop_is_rvc, // @[util.scala:453:14] output [39:0] io_deq_bits_uop_debug_pc, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_iq_type, // @[util.scala:453:14] output [9:0] io_deq_bits_uop_fu_code, // @[util.scala:453:14] output [3:0] io_deq_bits_uop_ctrl_br_type, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_ctrl_op1_sel, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_ctrl_op2_sel, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_ctrl_imm_sel, // @[util.scala:453:14] output [4:0] io_deq_bits_uop_ctrl_op_fcn, // @[util.scala:453:14] output io_deq_bits_uop_ctrl_fcn_dw, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_ctrl_csr_cmd, // @[util.scala:453:14] output io_deq_bits_uop_ctrl_is_load, // @[util.scala:453:14] output io_deq_bits_uop_ctrl_is_sta, // @[util.scala:453:14] output io_deq_bits_uop_ctrl_is_std, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_iw_state, // @[util.scala:453:14] output io_deq_bits_uop_iw_p1_poisoned, // @[util.scala:453:14] output io_deq_bits_uop_iw_p2_poisoned, // @[util.scala:453:14] output io_deq_bits_uop_is_br, // @[util.scala:453:14] output io_deq_bits_uop_is_jalr, // @[util.scala:453:14] output io_deq_bits_uop_is_jal, // @[util.scala:453:14] output io_deq_bits_uop_is_sfb, // @[util.scala:453:14] output [7:0] io_deq_bits_uop_br_mask, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_br_tag, // @[util.scala:453:14] output [3:0] io_deq_bits_uop_ftq_idx, // @[util.scala:453:14] output io_deq_bits_uop_edge_inst, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_pc_lob, // @[util.scala:453:14] output io_deq_bits_uop_taken, // @[util.scala:453:14] output [19:0] io_deq_bits_uop_imm_packed, // @[util.scala:453:14] output [11:0] io_deq_bits_uop_csr_addr, // @[util.scala:453:14] output [4:0] io_deq_bits_uop_rob_idx, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_ldq_idx, // @[util.scala:453:14] output [2:0] io_deq_bits_uop_stq_idx, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_rxq_idx, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_pdst, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_prs1, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_prs2, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_prs3, // @[util.scala:453:14] output [3:0] io_deq_bits_uop_ppred, // @[util.scala:453:14] output io_deq_bits_uop_prs1_busy, // @[util.scala:453:14] output io_deq_bits_uop_prs2_busy, // @[util.scala:453:14] output io_deq_bits_uop_prs3_busy, // @[util.scala:453:14] output io_deq_bits_uop_ppred_busy, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_stale_pdst, // @[util.scala:453:14] output io_deq_bits_uop_exception, // @[util.scala:453:14] output [63:0] io_deq_bits_uop_exc_cause, // @[util.scala:453:14] output io_deq_bits_uop_bypassable, // @[util.scala:453:14] output [4:0] io_deq_bits_uop_mem_cmd, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_mem_size, // @[util.scala:453:14] output io_deq_bits_uop_mem_signed, // @[util.scala:453:14] output io_deq_bits_uop_is_fence, // @[util.scala:453:14] output io_deq_bits_uop_is_fencei, // @[util.scala:453:14] output io_deq_bits_uop_is_amo, // @[util.scala:453:14] output io_deq_bits_uop_uses_ldq, // @[util.scala:453:14] output io_deq_bits_uop_uses_stq, // @[util.scala:453:14] output io_deq_bits_uop_is_sys_pc2epc, // @[util.scala:453:14] output io_deq_bits_uop_is_unique, // @[util.scala:453:14] output io_deq_bits_uop_flush_on_commit, // @[util.scala:453:14] output io_deq_bits_uop_ldst_is_rs1, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_ldst, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_lrs1, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_lrs2, // @[util.scala:453:14] output [5:0] io_deq_bits_uop_lrs3, // @[util.scala:453:14] output io_deq_bits_uop_ldst_val, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_dst_rtype, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_lrs1_rtype, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_lrs2_rtype, // @[util.scala:453:14] output io_deq_bits_uop_frs3_en, // @[util.scala:453:14] output io_deq_bits_uop_fp_val, // @[util.scala:453:14] output io_deq_bits_uop_fp_single, // @[util.scala:453:14] output io_deq_bits_uop_xcpt_pf_if, // @[util.scala:453:14] output io_deq_bits_uop_xcpt_ae_if, // @[util.scala:453:14] output io_deq_bits_uop_xcpt_ma_if, // @[util.scala:453:14] output io_deq_bits_uop_bp_debug_if, // @[util.scala:453:14] output io_deq_bits_uop_bp_xcpt_if, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_debug_fsrc, // @[util.scala:453:14] output [1:0] io_deq_bits_uop_debug_tsrc, // @[util.scala:453:14] output [64:0] io_deq_bits_data, // @[util.scala:453:14] output io_deq_bits_predicated, // @[util.scala:453:14] output io_deq_bits_fflags_valid, // @[util.scala:453:14] output [6:0] io_deq_bits_fflags_bits_uop_uopc, // @[util.scala:453:14] output [31:0] io_deq_bits_fflags_bits_uop_inst, // @[util.scala:453:14] output [31:0] io_deq_bits_fflags_bits_uop_debug_inst, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_is_rvc, // @[util.scala:453:14] output [39:0] io_deq_bits_fflags_bits_uop_debug_pc, // @[util.scala:453:14] output [2:0] io_deq_bits_fflags_bits_uop_iq_type, // @[util.scala:453:14] output [9:0] io_deq_bits_fflags_bits_uop_fu_code, // @[util.scala:453:14] output [3:0] io_deq_bits_fflags_bits_uop_ctrl_br_type, // @[util.scala:453:14] output [1:0] io_deq_bits_fflags_bits_uop_ctrl_op1_sel, // @[util.scala:453:14] output [2:0] io_deq_bits_fflags_bits_uop_ctrl_op2_sel, // @[util.scala:453:14] output [2:0] io_deq_bits_fflags_bits_uop_ctrl_imm_sel, // @[util.scala:453:14] output [4:0] io_deq_bits_fflags_bits_uop_ctrl_op_fcn, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_ctrl_fcn_dw, // @[util.scala:453:14] output [2:0] io_deq_bits_fflags_bits_uop_ctrl_csr_cmd, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_ctrl_is_load, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_ctrl_is_sta, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_ctrl_is_std, // @[util.scala:453:14] output [1:0] io_deq_bits_fflags_bits_uop_iw_state, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_iw_p1_poisoned, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_iw_p2_poisoned, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_is_br, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_is_jalr, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_is_jal, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_is_sfb, // @[util.scala:453:14] output [7:0] io_deq_bits_fflags_bits_uop_br_mask, // @[util.scala:453:14] output [2:0] io_deq_bits_fflags_bits_uop_br_tag, // @[util.scala:453:14] output [3:0] io_deq_bits_fflags_bits_uop_ftq_idx, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_edge_inst, // @[util.scala:453:14] output [5:0] io_deq_bits_fflags_bits_uop_pc_lob, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_taken, // @[util.scala:453:14] output [19:0] io_deq_bits_fflags_bits_uop_imm_packed, // @[util.scala:453:14] output [11:0] io_deq_bits_fflags_bits_uop_csr_addr, // @[util.scala:453:14] output [4:0] io_deq_bits_fflags_bits_uop_rob_idx, // @[util.scala:453:14] output [2:0] io_deq_bits_fflags_bits_uop_ldq_idx, // @[util.scala:453:14] output [2:0] io_deq_bits_fflags_bits_uop_stq_idx, // @[util.scala:453:14] output [1:0] io_deq_bits_fflags_bits_uop_rxq_idx, // @[util.scala:453:14] output [5:0] io_deq_bits_fflags_bits_uop_pdst, // @[util.scala:453:14] output [5:0] io_deq_bits_fflags_bits_uop_prs1, // @[util.scala:453:14] output [5:0] io_deq_bits_fflags_bits_uop_prs2, // @[util.scala:453:14] output [5:0] io_deq_bits_fflags_bits_uop_prs3, // @[util.scala:453:14] output [3:0] io_deq_bits_fflags_bits_uop_ppred, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_prs1_busy, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_prs2_busy, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_prs3_busy, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_ppred_busy, // @[util.scala:453:14] output [5:0] io_deq_bits_fflags_bits_uop_stale_pdst, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_exception, // @[util.scala:453:14] output [63:0] io_deq_bits_fflags_bits_uop_exc_cause, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_bypassable, // @[util.scala:453:14] output [4:0] io_deq_bits_fflags_bits_uop_mem_cmd, // @[util.scala:453:14] output [1:0] io_deq_bits_fflags_bits_uop_mem_size, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_mem_signed, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_is_fence, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_is_fencei, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_is_amo, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_uses_ldq, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_uses_stq, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_is_sys_pc2epc, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_is_unique, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_flush_on_commit, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_ldst_is_rs1, // @[util.scala:453:14] output [5:0] io_deq_bits_fflags_bits_uop_ldst, // @[util.scala:453:14] output [5:0] io_deq_bits_fflags_bits_uop_lrs1, // @[util.scala:453:14] output [5:0] io_deq_bits_fflags_bits_uop_lrs2, // @[util.scala:453:14] output [5:0] io_deq_bits_fflags_bits_uop_lrs3, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_ldst_val, // @[util.scala:453:14] output [1:0] io_deq_bits_fflags_bits_uop_dst_rtype, // @[util.scala:453:14] output [1:0] io_deq_bits_fflags_bits_uop_lrs1_rtype, // @[util.scala:453:14] output [1:0] io_deq_bits_fflags_bits_uop_lrs2_rtype, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_frs3_en, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_fp_val, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_fp_single, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_xcpt_pf_if, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_xcpt_ae_if, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_xcpt_ma_if, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_bp_debug_if, // @[util.scala:453:14] output io_deq_bits_fflags_bits_uop_bp_xcpt_if, // @[util.scala:453:14] output [1:0] io_deq_bits_fflags_bits_uop_debug_fsrc, // @[util.scala:453:14] output [1:0] io_deq_bits_fflags_bits_uop_debug_tsrc, // @[util.scala:453:14] output [4:0] io_deq_bits_fflags_bits_flags, // @[util.scala:453:14] input [7:0] io_brupdate_b1_resolve_mask, // @[util.scala:453:14] input [7:0] io_brupdate_b1_mispredict_mask, // @[util.scala:453:14] input [6:0] io_brupdate_b2_uop_uopc, // @[util.scala:453:14] input [31:0] io_brupdate_b2_uop_inst, // @[util.scala:453:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[util.scala:453:14] input io_brupdate_b2_uop_is_rvc, // @[util.scala:453:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[util.scala:453:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[util.scala:453:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[util.scala:453:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[util.scala:453:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[util.scala:453:14] input io_brupdate_b2_uop_ctrl_is_load, // @[util.scala:453:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[util.scala:453:14] input io_brupdate_b2_uop_ctrl_is_std, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[util.scala:453:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[util.scala:453:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[util.scala:453:14] input io_brupdate_b2_uop_is_br, // @[util.scala:453:14] input io_brupdate_b2_uop_is_jalr, // @[util.scala:453:14] input io_brupdate_b2_uop_is_jal, // @[util.scala:453:14] input io_brupdate_b2_uop_is_sfb, // @[util.scala:453:14] input [7:0] io_brupdate_b2_uop_br_mask, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_br_tag, // @[util.scala:453:14] input [3:0] io_brupdate_b2_uop_ftq_idx, // @[util.scala:453:14] input io_brupdate_b2_uop_edge_inst, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[util.scala:453:14] input io_brupdate_b2_uop_taken, // @[util.scala:453:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[util.scala:453:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[util.scala:453:14] input [4:0] io_brupdate_b2_uop_rob_idx, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_ldq_idx, // @[util.scala:453:14] input [2:0] io_brupdate_b2_uop_stq_idx, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_pdst, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_prs1, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_prs2, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_prs3, // @[util.scala:453:14] input [3:0] io_brupdate_b2_uop_ppred, // @[util.scala:453:14] input io_brupdate_b2_uop_prs1_busy, // @[util.scala:453:14] input io_brupdate_b2_uop_prs2_busy, // @[util.scala:453:14] input io_brupdate_b2_uop_prs3_busy, // @[util.scala:453:14] input io_brupdate_b2_uop_ppred_busy, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_stale_pdst, // @[util.scala:453:14] input io_brupdate_b2_uop_exception, // @[util.scala:453:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[util.scala:453:14] input io_brupdate_b2_uop_bypassable, // @[util.scala:453:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[util.scala:453:14] input io_brupdate_b2_uop_mem_signed, // @[util.scala:453:14] input io_brupdate_b2_uop_is_fence, // @[util.scala:453:14] input io_brupdate_b2_uop_is_fencei, // @[util.scala:453:14] input io_brupdate_b2_uop_is_amo, // @[util.scala:453:14] input io_brupdate_b2_uop_uses_ldq, // @[util.scala:453:14] input io_brupdate_b2_uop_uses_stq, // @[util.scala:453:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[util.scala:453:14] input io_brupdate_b2_uop_is_unique, // @[util.scala:453:14] input io_brupdate_b2_uop_flush_on_commit, // @[util.scala:453:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_ldst, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[util.scala:453:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[util.scala:453:14] input io_brupdate_b2_uop_ldst_val, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[util.scala:453:14] input io_brupdate_b2_uop_frs3_en, // @[util.scala:453:14] input io_brupdate_b2_uop_fp_val, // @[util.scala:453:14] input io_brupdate_b2_uop_fp_single, // @[util.scala:453:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[util.scala:453:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[util.scala:453:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[util.scala:453:14] input io_brupdate_b2_uop_bp_debug_if, // @[util.scala:453:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[util.scala:453:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[util.scala:453:14] input io_brupdate_b2_valid, // @[util.scala:453:14] input io_brupdate_b2_mispredict, // @[util.scala:453:14] input io_brupdate_b2_taken, // @[util.scala:453:14] input [2:0] io_brupdate_b2_cfi_type, // @[util.scala:453:14] input [1:0] io_brupdate_b2_pc_sel, // @[util.scala:453:14] input [39:0] io_brupdate_b2_jalr_target, // @[util.scala:453:14] input [20:0] io_brupdate_b2_target_offset, // @[util.scala:453:14] input io_flush, // @[util.scala:453:14] output io_empty // @[util.scala:453:14] ); wire [460:0] _ram_ext_R0_data; // @[util.scala:464:20] wire io_enq_valid_0 = io_enq_valid; // @[util.scala:448:7] wire [6:0] io_enq_bits_uop_uopc_0 = io_enq_bits_uop_uopc; // @[util.scala:448:7] wire [31:0] io_enq_bits_uop_inst_0 = io_enq_bits_uop_inst; // @[util.scala:448:7] wire [31:0] io_enq_bits_uop_debug_inst_0 = io_enq_bits_uop_debug_inst; // @[util.scala:448:7] wire io_enq_bits_uop_is_rvc_0 = io_enq_bits_uop_is_rvc; // @[util.scala:448:7] wire [39:0] io_enq_bits_uop_debug_pc_0 = io_enq_bits_uop_debug_pc; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_iq_type_0 = io_enq_bits_uop_iq_type; // @[util.scala:448:7] wire [9:0] io_enq_bits_uop_fu_code_0 = io_enq_bits_uop_fu_code; // @[util.scala:448:7] wire [3:0] io_enq_bits_uop_ctrl_br_type_0 = io_enq_bits_uop_ctrl_br_type; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_ctrl_op1_sel_0 = io_enq_bits_uop_ctrl_op1_sel; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_ctrl_op2_sel_0 = io_enq_bits_uop_ctrl_op2_sel; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_ctrl_imm_sel_0 = io_enq_bits_uop_ctrl_imm_sel; // @[util.scala:448:7] wire [4:0] io_enq_bits_uop_ctrl_op_fcn_0 = io_enq_bits_uop_ctrl_op_fcn; // @[util.scala:448:7] wire io_enq_bits_uop_ctrl_fcn_dw_0 = io_enq_bits_uop_ctrl_fcn_dw; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_ctrl_csr_cmd_0 = io_enq_bits_uop_ctrl_csr_cmd; // @[util.scala:448:7] wire io_enq_bits_uop_ctrl_is_load_0 = io_enq_bits_uop_ctrl_is_load; // @[util.scala:448:7] wire io_enq_bits_uop_ctrl_is_sta_0 = io_enq_bits_uop_ctrl_is_sta; // @[util.scala:448:7] wire io_enq_bits_uop_ctrl_is_std_0 = io_enq_bits_uop_ctrl_is_std; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_iw_state_0 = io_enq_bits_uop_iw_state; // @[util.scala:448:7] wire io_enq_bits_uop_iw_p1_poisoned_0 = io_enq_bits_uop_iw_p1_poisoned; // @[util.scala:448:7] wire io_enq_bits_uop_iw_p2_poisoned_0 = io_enq_bits_uop_iw_p2_poisoned; // @[util.scala:448:7] wire io_enq_bits_uop_is_br_0 = io_enq_bits_uop_is_br; // @[util.scala:448:7] wire io_enq_bits_uop_is_jalr_0 = io_enq_bits_uop_is_jalr; // @[util.scala:448:7] wire io_enq_bits_uop_is_jal_0 = io_enq_bits_uop_is_jal; // @[util.scala:448:7] wire io_enq_bits_uop_is_sfb_0 = io_enq_bits_uop_is_sfb; // @[util.scala:448:7] wire [7:0] io_enq_bits_uop_br_mask_0 = io_enq_bits_uop_br_mask; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_br_tag_0 = io_enq_bits_uop_br_tag; // @[util.scala:448:7] wire [3:0] io_enq_bits_uop_ftq_idx_0 = io_enq_bits_uop_ftq_idx; // @[util.scala:448:7] wire io_enq_bits_uop_edge_inst_0 = io_enq_bits_uop_edge_inst; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_pc_lob_0 = io_enq_bits_uop_pc_lob; // @[util.scala:448:7] wire io_enq_bits_uop_taken_0 = io_enq_bits_uop_taken; // @[util.scala:448:7] wire [19:0] io_enq_bits_uop_imm_packed_0 = io_enq_bits_uop_imm_packed; // @[util.scala:448:7] wire [11:0] io_enq_bits_uop_csr_addr_0 = io_enq_bits_uop_csr_addr; // @[util.scala:448:7] wire [4:0] io_enq_bits_uop_rob_idx_0 = io_enq_bits_uop_rob_idx; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_ldq_idx_0 = io_enq_bits_uop_ldq_idx; // @[util.scala:448:7] wire [2:0] io_enq_bits_uop_stq_idx_0 = io_enq_bits_uop_stq_idx; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_rxq_idx_0 = io_enq_bits_uop_rxq_idx; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_pdst_0 = io_enq_bits_uop_pdst; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_prs1_0 = io_enq_bits_uop_prs1; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_prs2_0 = io_enq_bits_uop_prs2; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_prs3_0 = io_enq_bits_uop_prs3; // @[util.scala:448:7] wire [3:0] io_enq_bits_uop_ppred_0 = io_enq_bits_uop_ppred; // @[util.scala:448:7] wire io_enq_bits_uop_prs1_busy_0 = io_enq_bits_uop_prs1_busy; // @[util.scala:448:7] wire io_enq_bits_uop_prs2_busy_0 = io_enq_bits_uop_prs2_busy; // @[util.scala:448:7] wire io_enq_bits_uop_prs3_busy_0 = io_enq_bits_uop_prs3_busy; // @[util.scala:448:7] wire io_enq_bits_uop_ppred_busy_0 = io_enq_bits_uop_ppred_busy; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_stale_pdst_0 = io_enq_bits_uop_stale_pdst; // @[util.scala:448:7] wire io_enq_bits_uop_exception_0 = io_enq_bits_uop_exception; // @[util.scala:448:7] wire [63:0] io_enq_bits_uop_exc_cause_0 = io_enq_bits_uop_exc_cause; // @[util.scala:448:7] wire io_enq_bits_uop_bypassable_0 = io_enq_bits_uop_bypassable; // @[util.scala:448:7] wire [4:0] io_enq_bits_uop_mem_cmd_0 = io_enq_bits_uop_mem_cmd; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_mem_size_0 = io_enq_bits_uop_mem_size; // @[util.scala:448:7] wire io_enq_bits_uop_mem_signed_0 = io_enq_bits_uop_mem_signed; // @[util.scala:448:7] wire io_enq_bits_uop_is_fence_0 = io_enq_bits_uop_is_fence; // @[util.scala:448:7] wire io_enq_bits_uop_is_fencei_0 = io_enq_bits_uop_is_fencei; // @[util.scala:448:7] wire io_enq_bits_uop_is_amo_0 = io_enq_bits_uop_is_amo; // @[util.scala:448:7] wire io_enq_bits_uop_uses_ldq_0 = io_enq_bits_uop_uses_ldq; // @[util.scala:448:7] wire io_enq_bits_uop_uses_stq_0 = io_enq_bits_uop_uses_stq; // @[util.scala:448:7] wire io_enq_bits_uop_is_sys_pc2epc_0 = io_enq_bits_uop_is_sys_pc2epc; // @[util.scala:448:7] wire io_enq_bits_uop_is_unique_0 = io_enq_bits_uop_is_unique; // @[util.scala:448:7] wire io_enq_bits_uop_flush_on_commit_0 = io_enq_bits_uop_flush_on_commit; // @[util.scala:448:7] wire io_enq_bits_uop_ldst_is_rs1_0 = io_enq_bits_uop_ldst_is_rs1; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_ldst_0 = io_enq_bits_uop_ldst; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_lrs1_0 = io_enq_bits_uop_lrs1; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_lrs2_0 = io_enq_bits_uop_lrs2; // @[util.scala:448:7] wire [5:0] io_enq_bits_uop_lrs3_0 = io_enq_bits_uop_lrs3; // @[util.scala:448:7] wire io_enq_bits_uop_ldst_val_0 = io_enq_bits_uop_ldst_val; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_dst_rtype_0 = io_enq_bits_uop_dst_rtype; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_lrs1_rtype_0 = io_enq_bits_uop_lrs1_rtype; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_lrs2_rtype_0 = io_enq_bits_uop_lrs2_rtype; // @[util.scala:448:7] wire io_enq_bits_uop_frs3_en_0 = io_enq_bits_uop_frs3_en; // @[util.scala:448:7] wire io_enq_bits_uop_fp_val_0 = io_enq_bits_uop_fp_val; // @[util.scala:448:7] wire io_enq_bits_uop_fp_single_0 = io_enq_bits_uop_fp_single; // @[util.scala:448:7] wire io_enq_bits_uop_xcpt_pf_if_0 = io_enq_bits_uop_xcpt_pf_if; // @[util.scala:448:7] wire io_enq_bits_uop_xcpt_ae_if_0 = io_enq_bits_uop_xcpt_ae_if; // @[util.scala:448:7] wire io_enq_bits_uop_xcpt_ma_if_0 = io_enq_bits_uop_xcpt_ma_if; // @[util.scala:448:7] wire io_enq_bits_uop_bp_debug_if_0 = io_enq_bits_uop_bp_debug_if; // @[util.scala:448:7] wire io_enq_bits_uop_bp_xcpt_if_0 = io_enq_bits_uop_bp_xcpt_if; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_debug_fsrc_0 = io_enq_bits_uop_debug_fsrc; // @[util.scala:448:7] wire [1:0] io_enq_bits_uop_debug_tsrc_0 = io_enq_bits_uop_debug_tsrc; // @[util.scala:448:7] wire [64:0] io_enq_bits_data_0 = io_enq_bits_data; // @[util.scala:448:7] wire io_enq_bits_fflags_valid_0 = io_enq_bits_fflags_valid; // @[util.scala:448:7] wire [6:0] io_enq_bits_fflags_bits_uop_uopc_0 = io_enq_bits_fflags_bits_uop_uopc; // @[util.scala:448:7] wire [31:0] io_enq_bits_fflags_bits_uop_inst_0 = io_enq_bits_fflags_bits_uop_inst; // @[util.scala:448:7] wire [31:0] io_enq_bits_fflags_bits_uop_debug_inst_0 = io_enq_bits_fflags_bits_uop_debug_inst; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_is_rvc_0 = io_enq_bits_fflags_bits_uop_is_rvc; // @[util.scala:448:7] wire [39:0] io_enq_bits_fflags_bits_uop_debug_pc_0 = io_enq_bits_fflags_bits_uop_debug_pc; // @[util.scala:448:7] wire [2:0] io_enq_bits_fflags_bits_uop_iq_type_0 = io_enq_bits_fflags_bits_uop_iq_type; // @[util.scala:448:7] wire [9:0] io_enq_bits_fflags_bits_uop_fu_code_0 = io_enq_bits_fflags_bits_uop_fu_code; // @[util.scala:448:7] wire [3:0] io_enq_bits_fflags_bits_uop_ctrl_br_type_0 = io_enq_bits_fflags_bits_uop_ctrl_br_type; // @[util.scala:448:7] wire [1:0] io_enq_bits_fflags_bits_uop_ctrl_op1_sel_0 = io_enq_bits_fflags_bits_uop_ctrl_op1_sel; // @[util.scala:448:7] wire [2:0] io_enq_bits_fflags_bits_uop_ctrl_op2_sel_0 = io_enq_bits_fflags_bits_uop_ctrl_op2_sel; // @[util.scala:448:7] wire [2:0] io_enq_bits_fflags_bits_uop_ctrl_imm_sel_0 = io_enq_bits_fflags_bits_uop_ctrl_imm_sel; // @[util.scala:448:7] wire [4:0] io_enq_bits_fflags_bits_uop_ctrl_op_fcn_0 = io_enq_bits_fflags_bits_uop_ctrl_op_fcn; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_ctrl_fcn_dw_0 = io_enq_bits_fflags_bits_uop_ctrl_fcn_dw; // @[util.scala:448:7] wire [2:0] io_enq_bits_fflags_bits_uop_ctrl_csr_cmd_0 = io_enq_bits_fflags_bits_uop_ctrl_csr_cmd; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_ctrl_is_load_0 = io_enq_bits_fflags_bits_uop_ctrl_is_load; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_ctrl_is_sta_0 = io_enq_bits_fflags_bits_uop_ctrl_is_sta; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_ctrl_is_std_0 = io_enq_bits_fflags_bits_uop_ctrl_is_std; // @[util.scala:448:7] wire [1:0] io_enq_bits_fflags_bits_uop_iw_state_0 = io_enq_bits_fflags_bits_uop_iw_state; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_iw_p1_poisoned_0 = io_enq_bits_fflags_bits_uop_iw_p1_poisoned; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_iw_p2_poisoned_0 = io_enq_bits_fflags_bits_uop_iw_p2_poisoned; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_is_br_0 = io_enq_bits_fflags_bits_uop_is_br; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_is_jalr_0 = io_enq_bits_fflags_bits_uop_is_jalr; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_is_jal_0 = io_enq_bits_fflags_bits_uop_is_jal; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_is_sfb_0 = io_enq_bits_fflags_bits_uop_is_sfb; // @[util.scala:448:7] wire [7:0] io_enq_bits_fflags_bits_uop_br_mask_0 = io_enq_bits_fflags_bits_uop_br_mask; // @[util.scala:448:7] wire [2:0] io_enq_bits_fflags_bits_uop_br_tag_0 = io_enq_bits_fflags_bits_uop_br_tag; // @[util.scala:448:7] wire [3:0] io_enq_bits_fflags_bits_uop_ftq_idx_0 = io_enq_bits_fflags_bits_uop_ftq_idx; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_edge_inst_0 = io_enq_bits_fflags_bits_uop_edge_inst; // @[util.scala:448:7] wire [5:0] io_enq_bits_fflags_bits_uop_pc_lob_0 = io_enq_bits_fflags_bits_uop_pc_lob; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_taken_0 = io_enq_bits_fflags_bits_uop_taken; // @[util.scala:448:7] wire [19:0] io_enq_bits_fflags_bits_uop_imm_packed_0 = io_enq_bits_fflags_bits_uop_imm_packed; // @[util.scala:448:7] wire [11:0] io_enq_bits_fflags_bits_uop_csr_addr_0 = io_enq_bits_fflags_bits_uop_csr_addr; // @[util.scala:448:7] wire [4:0] io_enq_bits_fflags_bits_uop_rob_idx_0 = io_enq_bits_fflags_bits_uop_rob_idx; // @[util.scala:448:7] wire [2:0] io_enq_bits_fflags_bits_uop_ldq_idx_0 = io_enq_bits_fflags_bits_uop_ldq_idx; // @[util.scala:448:7] wire [2:0] io_enq_bits_fflags_bits_uop_stq_idx_0 = io_enq_bits_fflags_bits_uop_stq_idx; // @[util.scala:448:7] wire [1:0] io_enq_bits_fflags_bits_uop_rxq_idx_0 = io_enq_bits_fflags_bits_uop_rxq_idx; // @[util.scala:448:7] wire [5:0] io_enq_bits_fflags_bits_uop_pdst_0 = io_enq_bits_fflags_bits_uop_pdst; // @[util.scala:448:7] wire [5:0] io_enq_bits_fflags_bits_uop_prs1_0 = io_enq_bits_fflags_bits_uop_prs1; // @[util.scala:448:7] wire [5:0] io_enq_bits_fflags_bits_uop_prs2_0 = io_enq_bits_fflags_bits_uop_prs2; // @[util.scala:448:7] wire [5:0] io_enq_bits_fflags_bits_uop_prs3_0 = io_enq_bits_fflags_bits_uop_prs3; // @[util.scala:448:7] wire [3:0] io_enq_bits_fflags_bits_uop_ppred_0 = io_enq_bits_fflags_bits_uop_ppred; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_prs1_busy_0 = io_enq_bits_fflags_bits_uop_prs1_busy; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_prs2_busy_0 = io_enq_bits_fflags_bits_uop_prs2_busy; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_prs3_busy_0 = io_enq_bits_fflags_bits_uop_prs3_busy; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_ppred_busy_0 = io_enq_bits_fflags_bits_uop_ppred_busy; // @[util.scala:448:7] wire [5:0] io_enq_bits_fflags_bits_uop_stale_pdst_0 = io_enq_bits_fflags_bits_uop_stale_pdst; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_exception_0 = io_enq_bits_fflags_bits_uop_exception; // @[util.scala:448:7] wire [63:0] io_enq_bits_fflags_bits_uop_exc_cause_0 = io_enq_bits_fflags_bits_uop_exc_cause; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_bypassable_0 = io_enq_bits_fflags_bits_uop_bypassable; // @[util.scala:448:7] wire [4:0] io_enq_bits_fflags_bits_uop_mem_cmd_0 = io_enq_bits_fflags_bits_uop_mem_cmd; // @[util.scala:448:7] wire [1:0] io_enq_bits_fflags_bits_uop_mem_size_0 = io_enq_bits_fflags_bits_uop_mem_size; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_mem_signed_0 = io_enq_bits_fflags_bits_uop_mem_signed; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_is_fence_0 = io_enq_bits_fflags_bits_uop_is_fence; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_is_fencei_0 = io_enq_bits_fflags_bits_uop_is_fencei; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_is_amo_0 = io_enq_bits_fflags_bits_uop_is_amo; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_uses_ldq_0 = io_enq_bits_fflags_bits_uop_uses_ldq; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_uses_stq_0 = io_enq_bits_fflags_bits_uop_uses_stq; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_is_sys_pc2epc_0 = io_enq_bits_fflags_bits_uop_is_sys_pc2epc; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_is_unique_0 = io_enq_bits_fflags_bits_uop_is_unique; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_flush_on_commit_0 = io_enq_bits_fflags_bits_uop_flush_on_commit; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_ldst_is_rs1_0 = io_enq_bits_fflags_bits_uop_ldst_is_rs1; // @[util.scala:448:7] wire [5:0] io_enq_bits_fflags_bits_uop_ldst_0 = io_enq_bits_fflags_bits_uop_ldst; // @[util.scala:448:7] wire [5:0] io_enq_bits_fflags_bits_uop_lrs1_0 = io_enq_bits_fflags_bits_uop_lrs1; // @[util.scala:448:7] wire [5:0] io_enq_bits_fflags_bits_uop_lrs2_0 = io_enq_bits_fflags_bits_uop_lrs2; // @[util.scala:448:7] wire [5:0] io_enq_bits_fflags_bits_uop_lrs3_0 = io_enq_bits_fflags_bits_uop_lrs3; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_ldst_val_0 = io_enq_bits_fflags_bits_uop_ldst_val; // @[util.scala:448:7] wire [1:0] io_enq_bits_fflags_bits_uop_dst_rtype_0 = io_enq_bits_fflags_bits_uop_dst_rtype; // @[util.scala:448:7] wire [1:0] io_enq_bits_fflags_bits_uop_lrs1_rtype_0 = io_enq_bits_fflags_bits_uop_lrs1_rtype; // @[util.scala:448:7] wire [1:0] io_enq_bits_fflags_bits_uop_lrs2_rtype_0 = io_enq_bits_fflags_bits_uop_lrs2_rtype; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_frs3_en_0 = io_enq_bits_fflags_bits_uop_frs3_en; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_fp_val_0 = io_enq_bits_fflags_bits_uop_fp_val; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_fp_single_0 = io_enq_bits_fflags_bits_uop_fp_single; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_xcpt_pf_if_0 = io_enq_bits_fflags_bits_uop_xcpt_pf_if; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_xcpt_ae_if_0 = io_enq_bits_fflags_bits_uop_xcpt_ae_if; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_xcpt_ma_if_0 = io_enq_bits_fflags_bits_uop_xcpt_ma_if; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_bp_debug_if_0 = io_enq_bits_fflags_bits_uop_bp_debug_if; // @[util.scala:448:7] wire io_enq_bits_fflags_bits_uop_bp_xcpt_if_0 = io_enq_bits_fflags_bits_uop_bp_xcpt_if; // @[util.scala:448:7] wire [1:0] io_enq_bits_fflags_bits_uop_debug_fsrc_0 = io_enq_bits_fflags_bits_uop_debug_fsrc; // @[util.scala:448:7] wire [1:0] io_enq_bits_fflags_bits_uop_debug_tsrc_0 = io_enq_bits_fflags_bits_uop_debug_tsrc; // @[util.scala:448:7] wire [4:0] io_enq_bits_fflags_bits_flags_0 = io_enq_bits_fflags_bits_flags; // @[util.scala:448:7] wire io_deq_ready_0 = io_deq_ready; // @[util.scala:448:7] wire [7:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[util.scala:448:7] wire [7:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[util.scala:448:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[util.scala:448:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[util.scala:448:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[util.scala:448:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[util.scala:448:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[util.scala:448:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[util.scala:448:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[util.scala:448:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[util.scala:448:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[util.scala:448:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[util.scala:448:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[util.scala:448:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[util.scala:448:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[util.scala:448:7] wire [7:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[util.scala:448:7] wire [3:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[util.scala:448:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[util.scala:448:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[util.scala:448:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[util.scala:448:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[util.scala:448:7] wire [4:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[util.scala:448:7] wire [3:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[util.scala:448:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[util.scala:448:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[util.scala:448:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[util.scala:448:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[util.scala:448:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[util.scala:448:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[util.scala:448:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[util.scala:448:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[util.scala:448:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[util.scala:448:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[util.scala:448:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[util.scala:448:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[util.scala:448:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[util.scala:448:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[util.scala:448:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[util.scala:448:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[util.scala:448:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[util.scala:448:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[util.scala:448:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[util.scala:448:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[util.scala:448:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[util.scala:448:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[util.scala:448:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[util.scala:448:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[util.scala:448:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[util.scala:448:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[util.scala:448:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[util.scala:448:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[util.scala:448:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[util.scala:448:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[util.scala:448:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[util.scala:448:7] wire io_flush_0 = io_flush; // @[util.scala:448:7] wire io_enq_bits_predicated = 1'h0; // @[util.scala:448:7] wire _valids_WIRE_0 = 1'h0; // @[util.scala:465:32] wire _valids_WIRE_1 = 1'h0; // @[util.scala:465:32] wire _valids_WIRE_2 = 1'h0; // @[util.scala:465:32] wire _valids_WIRE_3 = 1'h0; // @[util.scala:465:32] wire _valids_WIRE_4 = 1'h0; // @[util.scala:465:32] wire _valids_WIRE_5 = 1'h0; // @[util.scala:465:32] wire _valids_WIRE_6 = 1'h0; // @[util.scala:465:32] wire _io_enq_ready_T; // @[util.scala:504:19] wire _io_empty_T_1; // @[util.scala:473:25] wire _valids_0_T_4 = io_flush_0; // @[util.scala:448:7, :481:83] wire _valids_1_T_4 = io_flush_0; // @[util.scala:448:7, :481:83] wire _valids_2_T_4 = io_flush_0; // @[util.scala:448:7, :481:83] wire _valids_3_T_4 = io_flush_0; // @[util.scala:448:7, :481:83] wire _valids_4_T_4 = io_flush_0; // @[util.scala:448:7, :481:83] wire _valids_5_T_4 = io_flush_0; // @[util.scala:448:7, :481:83] wire _valids_6_T_4 = io_flush_0; // @[util.scala:448:7, :481:83] wire _io_deq_valid_T_6 = io_flush_0; // @[util.scala:448:7, :509:122] wire [2:0] _io_count_T_5; // @[util.scala:529:20] wire io_enq_ready_0; // @[util.scala:448:7] wire [3:0] io_deq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7] wire [4:0] io_deq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7] wire io_deq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7] wire io_deq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7] wire io_deq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7] wire io_deq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7] wire [6:0] io_deq_bits_uop_uopc_0; // @[util.scala:448:7] wire [31:0] io_deq_bits_uop_inst_0; // @[util.scala:448:7] wire [31:0] io_deq_bits_uop_debug_inst_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_rvc_0; // @[util.scala:448:7] wire [39:0] io_deq_bits_uop_debug_pc_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_iq_type_0; // @[util.scala:448:7] wire [9:0] io_deq_bits_uop_fu_code_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_iw_state_0; // @[util.scala:448:7] wire io_deq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7] wire io_deq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_br_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_jalr_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_jal_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_sfb_0; // @[util.scala:448:7] wire [7:0] io_deq_bits_uop_br_mask_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_br_tag_0; // @[util.scala:448:7] wire [3:0] io_deq_bits_uop_ftq_idx_0; // @[util.scala:448:7] wire io_deq_bits_uop_edge_inst_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_pc_lob_0; // @[util.scala:448:7] wire io_deq_bits_uop_taken_0; // @[util.scala:448:7] wire [19:0] io_deq_bits_uop_imm_packed_0; // @[util.scala:448:7] wire [11:0] io_deq_bits_uop_csr_addr_0; // @[util.scala:448:7] wire [4:0] io_deq_bits_uop_rob_idx_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_ldq_idx_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_uop_stq_idx_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_rxq_idx_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_pdst_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_prs1_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_prs2_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_prs3_0; // @[util.scala:448:7] wire [3:0] io_deq_bits_uop_ppred_0; // @[util.scala:448:7] wire io_deq_bits_uop_prs1_busy_0; // @[util.scala:448:7] wire io_deq_bits_uop_prs2_busy_0; // @[util.scala:448:7] wire io_deq_bits_uop_prs3_busy_0; // @[util.scala:448:7] wire io_deq_bits_uop_ppred_busy_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_stale_pdst_0; // @[util.scala:448:7] wire io_deq_bits_uop_exception_0; // @[util.scala:448:7] wire [63:0] io_deq_bits_uop_exc_cause_0; // @[util.scala:448:7] wire io_deq_bits_uop_bypassable_0; // @[util.scala:448:7] wire [4:0] io_deq_bits_uop_mem_cmd_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_mem_size_0; // @[util.scala:448:7] wire io_deq_bits_uop_mem_signed_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_fence_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_fencei_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_amo_0; // @[util.scala:448:7] wire io_deq_bits_uop_uses_ldq_0; // @[util.scala:448:7] wire io_deq_bits_uop_uses_stq_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7] wire io_deq_bits_uop_is_unique_0; // @[util.scala:448:7] wire io_deq_bits_uop_flush_on_commit_0; // @[util.scala:448:7] wire io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_ldst_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_lrs1_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_lrs2_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_uop_lrs3_0; // @[util.scala:448:7] wire io_deq_bits_uop_ldst_val_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_dst_rtype_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7] wire io_deq_bits_uop_frs3_en_0; // @[util.scala:448:7] wire io_deq_bits_uop_fp_val_0; // @[util.scala:448:7] wire io_deq_bits_uop_fp_single_0; // @[util.scala:448:7] wire io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7] wire io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7] wire io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7] wire io_deq_bits_uop_bp_debug_if_0; // @[util.scala:448:7] wire io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_debug_fsrc_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_uop_debug_tsrc_0; // @[util.scala:448:7] wire [3:0] io_deq_bits_fflags_bits_uop_ctrl_br_type_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_fflags_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_fflags_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_fflags_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7] wire [4:0] io_deq_bits_fflags_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_fflags_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_ctrl_is_load_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_ctrl_is_std_0; // @[util.scala:448:7] wire [6:0] io_deq_bits_fflags_bits_uop_uopc_0; // @[util.scala:448:7] wire [31:0] io_deq_bits_fflags_bits_uop_inst_0; // @[util.scala:448:7] wire [31:0] io_deq_bits_fflags_bits_uop_debug_inst_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_is_rvc_0; // @[util.scala:448:7] wire [39:0] io_deq_bits_fflags_bits_uop_debug_pc_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_fflags_bits_uop_iq_type_0; // @[util.scala:448:7] wire [9:0] io_deq_bits_fflags_bits_uop_fu_code_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_fflags_bits_uop_iw_state_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_is_br_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_is_jalr_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_is_jal_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_is_sfb_0; // @[util.scala:448:7] wire [7:0] io_deq_bits_fflags_bits_uop_br_mask_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_fflags_bits_uop_br_tag_0; // @[util.scala:448:7] wire [3:0] io_deq_bits_fflags_bits_uop_ftq_idx_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_edge_inst_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_fflags_bits_uop_pc_lob_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_taken_0; // @[util.scala:448:7] wire [19:0] io_deq_bits_fflags_bits_uop_imm_packed_0; // @[util.scala:448:7] wire [11:0] io_deq_bits_fflags_bits_uop_csr_addr_0; // @[util.scala:448:7] wire [4:0] io_deq_bits_fflags_bits_uop_rob_idx_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_fflags_bits_uop_ldq_idx_0; // @[util.scala:448:7] wire [2:0] io_deq_bits_fflags_bits_uop_stq_idx_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_fflags_bits_uop_rxq_idx_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_fflags_bits_uop_pdst_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_fflags_bits_uop_prs1_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_fflags_bits_uop_prs2_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_fflags_bits_uop_prs3_0; // @[util.scala:448:7] wire [3:0] io_deq_bits_fflags_bits_uop_ppred_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_prs1_busy_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_prs2_busy_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_prs3_busy_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_ppred_busy_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_fflags_bits_uop_stale_pdst_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_exception_0; // @[util.scala:448:7] wire [63:0] io_deq_bits_fflags_bits_uop_exc_cause_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_bypassable_0; // @[util.scala:448:7] wire [4:0] io_deq_bits_fflags_bits_uop_mem_cmd_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_fflags_bits_uop_mem_size_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_mem_signed_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_is_fence_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_is_fencei_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_is_amo_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_uses_ldq_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_uses_stq_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_is_unique_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_flush_on_commit_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_fflags_bits_uop_ldst_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_fflags_bits_uop_lrs1_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_fflags_bits_uop_lrs2_0; // @[util.scala:448:7] wire [5:0] io_deq_bits_fflags_bits_uop_lrs3_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_ldst_val_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_fflags_bits_uop_dst_rtype_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_fflags_bits_uop_lrs1_rtype_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_fflags_bits_uop_lrs2_rtype_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_frs3_en_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_fp_val_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_fp_single_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_bp_debug_if_0; // @[util.scala:448:7] wire io_deq_bits_fflags_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_fflags_bits_uop_debug_fsrc_0; // @[util.scala:448:7] wire [1:0] io_deq_bits_fflags_bits_uop_debug_tsrc_0; // @[util.scala:448:7] wire [4:0] io_deq_bits_fflags_bits_flags_0; // @[util.scala:448:7] wire io_deq_bits_fflags_valid_0; // @[util.scala:448:7] wire [64:0] io_deq_bits_data_0; // @[util.scala:448:7] wire io_deq_bits_predicated_0; // @[util.scala:448:7] wire io_deq_valid_0; // @[util.scala:448:7] wire io_empty_0; // @[util.scala:448:7] wire [2:0] io_count; // @[util.scala:448:7] wire [64:0] out_data = _ram_ext_R0_data[64:0]; // @[util.scala:464:20, :506:17] wire out_predicated = _ram_ext_R0_data[65]; // @[util.scala:464:20, :506:17] wire out_fflags_valid = _ram_ext_R0_data[66]; // @[util.scala:464:20, :506:17] wire [6:0] out_fflags_bits_uop_uopc = _ram_ext_R0_data[73:67]; // @[util.scala:464:20, :506:17] wire [31:0] out_fflags_bits_uop_inst = _ram_ext_R0_data[105:74]; // @[util.scala:464:20, :506:17] wire [31:0] out_fflags_bits_uop_debug_inst = _ram_ext_R0_data[137:106]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_is_rvc = _ram_ext_R0_data[138]; // @[util.scala:464:20, :506:17] wire [39:0] out_fflags_bits_uop_debug_pc = _ram_ext_R0_data[178:139]; // @[util.scala:464:20, :506:17] wire [2:0] out_fflags_bits_uop_iq_type = _ram_ext_R0_data[181:179]; // @[util.scala:464:20, :506:17] wire [9:0] out_fflags_bits_uop_fu_code = _ram_ext_R0_data[191:182]; // @[util.scala:464:20, :506:17] wire [3:0] out_fflags_bits_uop_ctrl_br_type = _ram_ext_R0_data[195:192]; // @[util.scala:464:20, :506:17] wire [1:0] out_fflags_bits_uop_ctrl_op1_sel = _ram_ext_R0_data[197:196]; // @[util.scala:464:20, :506:17] wire [2:0] out_fflags_bits_uop_ctrl_op2_sel = _ram_ext_R0_data[200:198]; // @[util.scala:464:20, :506:17] wire [2:0] out_fflags_bits_uop_ctrl_imm_sel = _ram_ext_R0_data[203:201]; // @[util.scala:464:20, :506:17] wire [4:0] out_fflags_bits_uop_ctrl_op_fcn = _ram_ext_R0_data[208:204]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_ctrl_fcn_dw = _ram_ext_R0_data[209]; // @[util.scala:464:20, :506:17] wire [2:0] out_fflags_bits_uop_ctrl_csr_cmd = _ram_ext_R0_data[212:210]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_ctrl_is_load = _ram_ext_R0_data[213]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_ctrl_is_sta = _ram_ext_R0_data[214]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_ctrl_is_std = _ram_ext_R0_data[215]; // @[util.scala:464:20, :506:17] wire [1:0] out_fflags_bits_uop_iw_state = _ram_ext_R0_data[217:216]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_iw_p1_poisoned = _ram_ext_R0_data[218]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_iw_p2_poisoned = _ram_ext_R0_data[219]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_is_br = _ram_ext_R0_data[220]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_is_jalr = _ram_ext_R0_data[221]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_is_jal = _ram_ext_R0_data[222]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_is_sfb = _ram_ext_R0_data[223]; // @[util.scala:464:20, :506:17] wire [7:0] out_fflags_bits_uop_br_mask = _ram_ext_R0_data[231:224]; // @[util.scala:464:20, :506:17] wire [2:0] out_fflags_bits_uop_br_tag = _ram_ext_R0_data[234:232]; // @[util.scala:464:20, :506:17] wire [3:0] out_fflags_bits_uop_ftq_idx = _ram_ext_R0_data[238:235]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_edge_inst = _ram_ext_R0_data[239]; // @[util.scala:464:20, :506:17] wire [5:0] out_fflags_bits_uop_pc_lob = _ram_ext_R0_data[245:240]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_taken = _ram_ext_R0_data[246]; // @[util.scala:464:20, :506:17] wire [19:0] out_fflags_bits_uop_imm_packed = _ram_ext_R0_data[266:247]; // @[util.scala:464:20, :506:17] wire [11:0] out_fflags_bits_uop_csr_addr = _ram_ext_R0_data[278:267]; // @[util.scala:464:20, :506:17] wire [4:0] out_fflags_bits_uop_rob_idx = _ram_ext_R0_data[283:279]; // @[util.scala:464:20, :506:17] wire [2:0] out_fflags_bits_uop_ldq_idx = _ram_ext_R0_data[286:284]; // @[util.scala:464:20, :506:17] wire [2:0] out_fflags_bits_uop_stq_idx = _ram_ext_R0_data[289:287]; // @[util.scala:464:20, :506:17] wire [1:0] out_fflags_bits_uop_rxq_idx = _ram_ext_R0_data[291:290]; // @[util.scala:464:20, :506:17] wire [5:0] out_fflags_bits_uop_pdst = _ram_ext_R0_data[297:292]; // @[util.scala:464:20, :506:17] wire [5:0] out_fflags_bits_uop_prs1 = _ram_ext_R0_data[303:298]; // @[util.scala:464:20, :506:17] wire [5:0] out_fflags_bits_uop_prs2 = _ram_ext_R0_data[309:304]; // @[util.scala:464:20, :506:17] wire [5:0] out_fflags_bits_uop_prs3 = _ram_ext_R0_data[315:310]; // @[util.scala:464:20, :506:17] wire [3:0] out_fflags_bits_uop_ppred = _ram_ext_R0_data[319:316]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_prs1_busy = _ram_ext_R0_data[320]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_prs2_busy = _ram_ext_R0_data[321]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_prs3_busy = _ram_ext_R0_data[322]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_ppred_busy = _ram_ext_R0_data[323]; // @[util.scala:464:20, :506:17] wire [5:0] out_fflags_bits_uop_stale_pdst = _ram_ext_R0_data[329:324]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_exception = _ram_ext_R0_data[330]; // @[util.scala:464:20, :506:17] wire [63:0] out_fflags_bits_uop_exc_cause = _ram_ext_R0_data[394:331]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_bypassable = _ram_ext_R0_data[395]; // @[util.scala:464:20, :506:17] wire [4:0] out_fflags_bits_uop_mem_cmd = _ram_ext_R0_data[400:396]; // @[util.scala:464:20, :506:17] wire [1:0] out_fflags_bits_uop_mem_size = _ram_ext_R0_data[402:401]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_mem_signed = _ram_ext_R0_data[403]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_is_fence = _ram_ext_R0_data[404]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_is_fencei = _ram_ext_R0_data[405]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_is_amo = _ram_ext_R0_data[406]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_uses_ldq = _ram_ext_R0_data[407]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_uses_stq = _ram_ext_R0_data[408]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_is_sys_pc2epc = _ram_ext_R0_data[409]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_is_unique = _ram_ext_R0_data[410]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_flush_on_commit = _ram_ext_R0_data[411]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_ldst_is_rs1 = _ram_ext_R0_data[412]; // @[util.scala:464:20, :506:17] wire [5:0] out_fflags_bits_uop_ldst = _ram_ext_R0_data[418:413]; // @[util.scala:464:20, :506:17] wire [5:0] out_fflags_bits_uop_lrs1 = _ram_ext_R0_data[424:419]; // @[util.scala:464:20, :506:17] wire [5:0] out_fflags_bits_uop_lrs2 = _ram_ext_R0_data[430:425]; // @[util.scala:464:20, :506:17] wire [5:0] out_fflags_bits_uop_lrs3 = _ram_ext_R0_data[436:431]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_ldst_val = _ram_ext_R0_data[437]; // @[util.scala:464:20, :506:17] wire [1:0] out_fflags_bits_uop_dst_rtype = _ram_ext_R0_data[439:438]; // @[util.scala:464:20, :506:17] wire [1:0] out_fflags_bits_uop_lrs1_rtype = _ram_ext_R0_data[441:440]; // @[util.scala:464:20, :506:17] wire [1:0] out_fflags_bits_uop_lrs2_rtype = _ram_ext_R0_data[443:442]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_frs3_en = _ram_ext_R0_data[444]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_fp_val = _ram_ext_R0_data[445]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_fp_single = _ram_ext_R0_data[446]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_xcpt_pf_if = _ram_ext_R0_data[447]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_xcpt_ae_if = _ram_ext_R0_data[448]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_xcpt_ma_if = _ram_ext_R0_data[449]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_bp_debug_if = _ram_ext_R0_data[450]; // @[util.scala:464:20, :506:17] wire out_fflags_bits_uop_bp_xcpt_if = _ram_ext_R0_data[451]; // @[util.scala:464:20, :506:17] wire [1:0] out_fflags_bits_uop_debug_fsrc = _ram_ext_R0_data[453:452]; // @[util.scala:464:20, :506:17] wire [1:0] out_fflags_bits_uop_debug_tsrc = _ram_ext_R0_data[455:454]; // @[util.scala:464:20, :506:17] wire [4:0] out_fflags_bits_flags = _ram_ext_R0_data[460:456]; // @[util.scala:464:20, :506:17] reg valids_0; // @[util.scala:465:24] reg valids_1; // @[util.scala:465:24] reg valids_2; // @[util.scala:465:24] reg valids_3; // @[util.scala:465:24] reg valids_4; // @[util.scala:465:24] reg valids_5; // @[util.scala:465:24] reg valids_6; // @[util.scala:465:24] reg [6:0] uops_0_uopc; // @[util.scala:466:20] reg [31:0] uops_0_inst; // @[util.scala:466:20] reg [31:0] uops_0_debug_inst; // @[util.scala:466:20] reg uops_0_is_rvc; // @[util.scala:466:20] reg [39:0] uops_0_debug_pc; // @[util.scala:466:20] reg [2:0] uops_0_iq_type; // @[util.scala:466:20] reg [9:0] uops_0_fu_code; // @[util.scala:466:20] reg [3:0] uops_0_ctrl_br_type; // @[util.scala:466:20] reg [1:0] uops_0_ctrl_op1_sel; // @[util.scala:466:20] reg [2:0] uops_0_ctrl_op2_sel; // @[util.scala:466:20] reg [2:0] uops_0_ctrl_imm_sel; // @[util.scala:466:20] reg [4:0] uops_0_ctrl_op_fcn; // @[util.scala:466:20] reg uops_0_ctrl_fcn_dw; // @[util.scala:466:20] reg [2:0] uops_0_ctrl_csr_cmd; // @[util.scala:466:20] reg uops_0_ctrl_is_load; // @[util.scala:466:20] reg uops_0_ctrl_is_sta; // @[util.scala:466:20] reg uops_0_ctrl_is_std; // @[util.scala:466:20] reg [1:0] uops_0_iw_state; // @[util.scala:466:20] reg uops_0_iw_p1_poisoned; // @[util.scala:466:20] reg uops_0_iw_p2_poisoned; // @[util.scala:466:20] reg uops_0_is_br; // @[util.scala:466:20] reg uops_0_is_jalr; // @[util.scala:466:20] reg uops_0_is_jal; // @[util.scala:466:20] reg uops_0_is_sfb; // @[util.scala:466:20] reg [7:0] uops_0_br_mask; // @[util.scala:466:20] reg [2:0] uops_0_br_tag; // @[util.scala:466:20] reg [3:0] uops_0_ftq_idx; // @[util.scala:466:20] reg uops_0_edge_inst; // @[util.scala:466:20] reg [5:0] uops_0_pc_lob; // @[util.scala:466:20] reg uops_0_taken; // @[util.scala:466:20] reg [19:0] uops_0_imm_packed; // @[util.scala:466:20] reg [11:0] uops_0_csr_addr; // @[util.scala:466:20] reg [4:0] uops_0_rob_idx; // @[util.scala:466:20] reg [2:0] uops_0_ldq_idx; // @[util.scala:466:20] reg [2:0] uops_0_stq_idx; // @[util.scala:466:20] reg [1:0] uops_0_rxq_idx; // @[util.scala:466:20] reg [5:0] uops_0_pdst; // @[util.scala:466:20] reg [5:0] uops_0_prs1; // @[util.scala:466:20] reg [5:0] uops_0_prs2; // @[util.scala:466:20] reg [5:0] uops_0_prs3; // @[util.scala:466:20] reg [3:0] uops_0_ppred; // @[util.scala:466:20] reg uops_0_prs1_busy; // @[util.scala:466:20] reg uops_0_prs2_busy; // @[util.scala:466:20] reg uops_0_prs3_busy; // @[util.scala:466:20] reg uops_0_ppred_busy; // @[util.scala:466:20] reg [5:0] uops_0_stale_pdst; // @[util.scala:466:20] reg uops_0_exception; // @[util.scala:466:20] reg [63:0] uops_0_exc_cause; // @[util.scala:466:20] reg uops_0_bypassable; // @[util.scala:466:20] reg [4:0] uops_0_mem_cmd; // @[util.scala:466:20] reg [1:0] uops_0_mem_size; // @[util.scala:466:20] reg uops_0_mem_signed; // @[util.scala:466:20] reg uops_0_is_fence; // @[util.scala:466:20] reg uops_0_is_fencei; // @[util.scala:466:20] reg uops_0_is_amo; // @[util.scala:466:20] reg uops_0_uses_ldq; // @[util.scala:466:20] reg uops_0_uses_stq; // @[util.scala:466:20] reg uops_0_is_sys_pc2epc; // @[util.scala:466:20] reg uops_0_is_unique; // @[util.scala:466:20] reg uops_0_flush_on_commit; // @[util.scala:466:20] reg uops_0_ldst_is_rs1; // @[util.scala:466:20] reg [5:0] uops_0_ldst; // @[util.scala:466:20] reg [5:0] uops_0_lrs1; // @[util.scala:466:20] reg [5:0] uops_0_lrs2; // @[util.scala:466:20] reg [5:0] uops_0_lrs3; // @[util.scala:466:20] reg uops_0_ldst_val; // @[util.scala:466:20] reg [1:0] uops_0_dst_rtype; // @[util.scala:466:20] reg [1:0] uops_0_lrs1_rtype; // @[util.scala:466:20] reg [1:0] uops_0_lrs2_rtype; // @[util.scala:466:20] reg uops_0_frs3_en; // @[util.scala:466:20] reg uops_0_fp_val; // @[util.scala:466:20] reg uops_0_fp_single; // @[util.scala:466:20] reg uops_0_xcpt_pf_if; // @[util.scala:466:20] reg uops_0_xcpt_ae_if; // @[util.scala:466:20] reg uops_0_xcpt_ma_if; // @[util.scala:466:20] reg uops_0_bp_debug_if; // @[util.scala:466:20] reg uops_0_bp_xcpt_if; // @[util.scala:466:20] reg [1:0] uops_0_debug_fsrc; // @[util.scala:466:20] reg [1:0] uops_0_debug_tsrc; // @[util.scala:466:20] reg [6:0] uops_1_uopc; // @[util.scala:466:20] reg [31:0] uops_1_inst; // @[util.scala:466:20] reg [31:0] uops_1_debug_inst; // @[util.scala:466:20] reg uops_1_is_rvc; // @[util.scala:466:20] reg [39:0] uops_1_debug_pc; // @[util.scala:466:20] reg [2:0] uops_1_iq_type; // @[util.scala:466:20] reg [9:0] uops_1_fu_code; // @[util.scala:466:20] reg [3:0] uops_1_ctrl_br_type; // @[util.scala:466:20] reg [1:0] uops_1_ctrl_op1_sel; // @[util.scala:466:20] reg [2:0] uops_1_ctrl_op2_sel; // @[util.scala:466:20] reg [2:0] uops_1_ctrl_imm_sel; // @[util.scala:466:20] reg [4:0] uops_1_ctrl_op_fcn; // @[util.scala:466:20] reg uops_1_ctrl_fcn_dw; // @[util.scala:466:20] reg [2:0] uops_1_ctrl_csr_cmd; // @[util.scala:466:20] reg uops_1_ctrl_is_load; // @[util.scala:466:20] reg uops_1_ctrl_is_sta; // @[util.scala:466:20] reg uops_1_ctrl_is_std; // @[util.scala:466:20] reg [1:0] uops_1_iw_state; // @[util.scala:466:20] reg uops_1_iw_p1_poisoned; // @[util.scala:466:20] reg uops_1_iw_p2_poisoned; // @[util.scala:466:20] reg uops_1_is_br; // @[util.scala:466:20] reg uops_1_is_jalr; // @[util.scala:466:20] reg uops_1_is_jal; // @[util.scala:466:20] reg uops_1_is_sfb; // @[util.scala:466:20] reg [7:0] uops_1_br_mask; // @[util.scala:466:20] reg [2:0] uops_1_br_tag; // @[util.scala:466:20] reg [3:0] uops_1_ftq_idx; // @[util.scala:466:20] reg uops_1_edge_inst; // @[util.scala:466:20] reg [5:0] uops_1_pc_lob; // @[util.scala:466:20] reg uops_1_taken; // @[util.scala:466:20] reg [19:0] uops_1_imm_packed; // @[util.scala:466:20] reg [11:0] uops_1_csr_addr; // @[util.scala:466:20] reg [4:0] uops_1_rob_idx; // @[util.scala:466:20] reg [2:0] uops_1_ldq_idx; // @[util.scala:466:20] reg [2:0] uops_1_stq_idx; // @[util.scala:466:20] reg [1:0] uops_1_rxq_idx; // @[util.scala:466:20] reg [5:0] uops_1_pdst; // @[util.scala:466:20] reg [5:0] uops_1_prs1; // @[util.scala:466:20] reg [5:0] uops_1_prs2; // @[util.scala:466:20] reg [5:0] uops_1_prs3; // @[util.scala:466:20] reg [3:0] uops_1_ppred; // @[util.scala:466:20] reg uops_1_prs1_busy; // @[util.scala:466:20] reg uops_1_prs2_busy; // @[util.scala:466:20] reg uops_1_prs3_busy; // @[util.scala:466:20] reg uops_1_ppred_busy; // @[util.scala:466:20] reg [5:0] uops_1_stale_pdst; // @[util.scala:466:20] reg uops_1_exception; // @[util.scala:466:20] reg [63:0] uops_1_exc_cause; // @[util.scala:466:20] reg uops_1_bypassable; // @[util.scala:466:20] reg [4:0] uops_1_mem_cmd; // @[util.scala:466:20] reg [1:0] uops_1_mem_size; // @[util.scala:466:20] reg uops_1_mem_signed; // @[util.scala:466:20] reg uops_1_is_fence; // @[util.scala:466:20] reg uops_1_is_fencei; // @[util.scala:466:20] reg uops_1_is_amo; // @[util.scala:466:20] reg uops_1_uses_ldq; // @[util.scala:466:20] reg uops_1_uses_stq; // @[util.scala:466:20] reg uops_1_is_sys_pc2epc; // @[util.scala:466:20] reg uops_1_is_unique; // @[util.scala:466:20] reg uops_1_flush_on_commit; // @[util.scala:466:20] reg uops_1_ldst_is_rs1; // @[util.scala:466:20] reg [5:0] uops_1_ldst; // @[util.scala:466:20] reg [5:0] uops_1_lrs1; // @[util.scala:466:20] reg [5:0] uops_1_lrs2; // @[util.scala:466:20] reg [5:0] uops_1_lrs3; // @[util.scala:466:20] reg uops_1_ldst_val; // @[util.scala:466:20] reg [1:0] uops_1_dst_rtype; // @[util.scala:466:20] reg [1:0] uops_1_lrs1_rtype; // @[util.scala:466:20] reg [1:0] uops_1_lrs2_rtype; // @[util.scala:466:20] reg uops_1_frs3_en; // @[util.scala:466:20] reg uops_1_fp_val; // @[util.scala:466:20] reg uops_1_fp_single; // @[util.scala:466:20] reg uops_1_xcpt_pf_if; // @[util.scala:466:20] reg uops_1_xcpt_ae_if; // @[util.scala:466:20] reg uops_1_xcpt_ma_if; // @[util.scala:466:20] reg uops_1_bp_debug_if; // @[util.scala:466:20] reg uops_1_bp_xcpt_if; // @[util.scala:466:20] reg [1:0] uops_1_debug_fsrc; // @[util.scala:466:20] reg [1:0] uops_1_debug_tsrc; // @[util.scala:466:20] reg [6:0] uops_2_uopc; // @[util.scala:466:20] reg [31:0] uops_2_inst; // @[util.scala:466:20] reg [31:0] uops_2_debug_inst; // @[util.scala:466:20] reg uops_2_is_rvc; // @[util.scala:466:20] reg [39:0] uops_2_debug_pc; // @[util.scala:466:20] reg [2:0] uops_2_iq_type; // @[util.scala:466:20] reg [9:0] uops_2_fu_code; // @[util.scala:466:20] reg [3:0] uops_2_ctrl_br_type; // @[util.scala:466:20] reg [1:0] uops_2_ctrl_op1_sel; // @[util.scala:466:20] reg [2:0] uops_2_ctrl_op2_sel; // @[util.scala:466:20] reg [2:0] uops_2_ctrl_imm_sel; // @[util.scala:466:20] reg [4:0] uops_2_ctrl_op_fcn; // @[util.scala:466:20] reg uops_2_ctrl_fcn_dw; // @[util.scala:466:20] reg [2:0] uops_2_ctrl_csr_cmd; // @[util.scala:466:20] reg uops_2_ctrl_is_load; // @[util.scala:466:20] reg uops_2_ctrl_is_sta; // @[util.scala:466:20] reg uops_2_ctrl_is_std; // @[util.scala:466:20] reg [1:0] uops_2_iw_state; // @[util.scala:466:20] reg uops_2_iw_p1_poisoned; // @[util.scala:466:20] reg uops_2_iw_p2_poisoned; // @[util.scala:466:20] reg uops_2_is_br; // @[util.scala:466:20] reg uops_2_is_jalr; // @[util.scala:466:20] reg uops_2_is_jal; // @[util.scala:466:20] reg uops_2_is_sfb; // @[util.scala:466:20] reg [7:0] uops_2_br_mask; // @[util.scala:466:20] reg [2:0] uops_2_br_tag; // @[util.scala:466:20] reg [3:0] uops_2_ftq_idx; // @[util.scala:466:20] reg uops_2_edge_inst; // @[util.scala:466:20] reg [5:0] uops_2_pc_lob; // @[util.scala:466:20] reg uops_2_taken; // @[util.scala:466:20] reg [19:0] uops_2_imm_packed; // @[util.scala:466:20] reg [11:0] uops_2_csr_addr; // @[util.scala:466:20] reg [4:0] uops_2_rob_idx; // @[util.scala:466:20] reg [2:0] uops_2_ldq_idx; // @[util.scala:466:20] reg [2:0] uops_2_stq_idx; // @[util.scala:466:20] reg [1:0] uops_2_rxq_idx; // @[util.scala:466:20] reg [5:0] uops_2_pdst; // @[util.scala:466:20] reg [5:0] uops_2_prs1; // @[util.scala:466:20] reg [5:0] uops_2_prs2; // @[util.scala:466:20] reg [5:0] uops_2_prs3; // @[util.scala:466:20] reg [3:0] uops_2_ppred; // @[util.scala:466:20] reg uops_2_prs1_busy; // @[util.scala:466:20] reg uops_2_prs2_busy; // @[util.scala:466:20] reg uops_2_prs3_busy; // @[util.scala:466:20] reg uops_2_ppred_busy; // @[util.scala:466:20] reg [5:0] uops_2_stale_pdst; // @[util.scala:466:20] reg uops_2_exception; // @[util.scala:466:20] reg [63:0] uops_2_exc_cause; // @[util.scala:466:20] reg uops_2_bypassable; // @[util.scala:466:20] reg [4:0] uops_2_mem_cmd; // @[util.scala:466:20] reg [1:0] uops_2_mem_size; // @[util.scala:466:20] reg uops_2_mem_signed; // @[util.scala:466:20] reg uops_2_is_fence; // @[util.scala:466:20] reg uops_2_is_fencei; // @[util.scala:466:20] reg uops_2_is_amo; // @[util.scala:466:20] reg uops_2_uses_ldq; // @[util.scala:466:20] reg uops_2_uses_stq; // @[util.scala:466:20] reg uops_2_is_sys_pc2epc; // @[util.scala:466:20] reg uops_2_is_unique; // @[util.scala:466:20] reg uops_2_flush_on_commit; // @[util.scala:466:20] reg uops_2_ldst_is_rs1; // @[util.scala:466:20] reg [5:0] uops_2_ldst; // @[util.scala:466:20] reg [5:0] uops_2_lrs1; // @[util.scala:466:20] reg [5:0] uops_2_lrs2; // @[util.scala:466:20] reg [5:0] uops_2_lrs3; // @[util.scala:466:20] reg uops_2_ldst_val; // @[util.scala:466:20] reg [1:0] uops_2_dst_rtype; // @[util.scala:466:20] reg [1:0] uops_2_lrs1_rtype; // @[util.scala:466:20] reg [1:0] uops_2_lrs2_rtype; // @[util.scala:466:20] reg uops_2_frs3_en; // @[util.scala:466:20] reg uops_2_fp_val; // @[util.scala:466:20] reg uops_2_fp_single; // @[util.scala:466:20] reg uops_2_xcpt_pf_if; // @[util.scala:466:20] reg uops_2_xcpt_ae_if; // @[util.scala:466:20] reg uops_2_xcpt_ma_if; // @[util.scala:466:20] reg uops_2_bp_debug_if; // @[util.scala:466:20] reg uops_2_bp_xcpt_if; // @[util.scala:466:20] reg [1:0] uops_2_debug_fsrc; // @[util.scala:466:20] reg [1:0] uops_2_debug_tsrc; // @[util.scala:466:20] reg [6:0] uops_3_uopc; // @[util.scala:466:20] reg [31:0] uops_3_inst; // @[util.scala:466:20] reg [31:0] uops_3_debug_inst; // @[util.scala:466:20] reg uops_3_is_rvc; // @[util.scala:466:20] reg [39:0] uops_3_debug_pc; // @[util.scala:466:20] reg [2:0] uops_3_iq_type; // @[util.scala:466:20] reg [9:0] uops_3_fu_code; // @[util.scala:466:20] reg [3:0] uops_3_ctrl_br_type; // @[util.scala:466:20] reg [1:0] uops_3_ctrl_op1_sel; // @[util.scala:466:20] reg [2:0] uops_3_ctrl_op2_sel; // @[util.scala:466:20] reg [2:0] uops_3_ctrl_imm_sel; // @[util.scala:466:20] reg [4:0] uops_3_ctrl_op_fcn; // @[util.scala:466:20] reg uops_3_ctrl_fcn_dw; // @[util.scala:466:20] reg [2:0] uops_3_ctrl_csr_cmd; // @[util.scala:466:20] reg uops_3_ctrl_is_load; // @[util.scala:466:20] reg uops_3_ctrl_is_sta; // @[util.scala:466:20] reg uops_3_ctrl_is_std; // @[util.scala:466:20] reg [1:0] uops_3_iw_state; // @[util.scala:466:20] reg uops_3_iw_p1_poisoned; // @[util.scala:466:20] reg uops_3_iw_p2_poisoned; // @[util.scala:466:20] reg uops_3_is_br; // @[util.scala:466:20] reg uops_3_is_jalr; // @[util.scala:466:20] reg uops_3_is_jal; // @[util.scala:466:20] reg uops_3_is_sfb; // @[util.scala:466:20] reg [7:0] uops_3_br_mask; // @[util.scala:466:20] reg [2:0] uops_3_br_tag; // @[util.scala:466:20] reg [3:0] uops_3_ftq_idx; // @[util.scala:466:20] reg uops_3_edge_inst; // @[util.scala:466:20] reg [5:0] uops_3_pc_lob; // @[util.scala:466:20] reg uops_3_taken; // @[util.scala:466:20] reg [19:0] uops_3_imm_packed; // @[util.scala:466:20] reg [11:0] uops_3_csr_addr; // @[util.scala:466:20] reg [4:0] uops_3_rob_idx; // @[util.scala:466:20] reg [2:0] uops_3_ldq_idx; // @[util.scala:466:20] reg [2:0] uops_3_stq_idx; // @[util.scala:466:20] reg [1:0] uops_3_rxq_idx; // @[util.scala:466:20] reg [5:0] uops_3_pdst; // @[util.scala:466:20] reg [5:0] uops_3_prs1; // @[util.scala:466:20] reg [5:0] uops_3_prs2; // @[util.scala:466:20] reg [5:0] uops_3_prs3; // @[util.scala:466:20] reg [3:0] uops_3_ppred; // @[util.scala:466:20] reg uops_3_prs1_busy; // @[util.scala:466:20] reg uops_3_prs2_busy; // @[util.scala:466:20] reg uops_3_prs3_busy; // @[util.scala:466:20] reg uops_3_ppred_busy; // @[util.scala:466:20] reg [5:0] uops_3_stale_pdst; // @[util.scala:466:20] reg uops_3_exception; // @[util.scala:466:20] reg [63:0] uops_3_exc_cause; // @[util.scala:466:20] reg uops_3_bypassable; // @[util.scala:466:20] reg [4:0] uops_3_mem_cmd; // @[util.scala:466:20] reg [1:0] uops_3_mem_size; // @[util.scala:466:20] reg uops_3_mem_signed; // @[util.scala:466:20] reg uops_3_is_fence; // @[util.scala:466:20] reg uops_3_is_fencei; // @[util.scala:466:20] reg uops_3_is_amo; // @[util.scala:466:20] reg uops_3_uses_ldq; // @[util.scala:466:20] reg uops_3_uses_stq; // @[util.scala:466:20] reg uops_3_is_sys_pc2epc; // @[util.scala:466:20] reg uops_3_is_unique; // @[util.scala:466:20] reg uops_3_flush_on_commit; // @[util.scala:466:20] reg uops_3_ldst_is_rs1; // @[util.scala:466:20] reg [5:0] uops_3_ldst; // @[util.scala:466:20] reg [5:0] uops_3_lrs1; // @[util.scala:466:20] reg [5:0] uops_3_lrs2; // @[util.scala:466:20] reg [5:0] uops_3_lrs3; // @[util.scala:466:20] reg uops_3_ldst_val; // @[util.scala:466:20] reg [1:0] uops_3_dst_rtype; // @[util.scala:466:20] reg [1:0] uops_3_lrs1_rtype; // @[util.scala:466:20] reg [1:0] uops_3_lrs2_rtype; // @[util.scala:466:20] reg uops_3_frs3_en; // @[util.scala:466:20] reg uops_3_fp_val; // @[util.scala:466:20] reg uops_3_fp_single; // @[util.scala:466:20] reg uops_3_xcpt_pf_if; // @[util.scala:466:20] reg uops_3_xcpt_ae_if; // @[util.scala:466:20] reg uops_3_xcpt_ma_if; // @[util.scala:466:20] reg uops_3_bp_debug_if; // @[util.scala:466:20] reg uops_3_bp_xcpt_if; // @[util.scala:466:20] reg [1:0] uops_3_debug_fsrc; // @[util.scala:466:20] reg [1:0] uops_3_debug_tsrc; // @[util.scala:466:20] reg [6:0] uops_4_uopc; // @[util.scala:466:20] reg [31:0] uops_4_inst; // @[util.scala:466:20] reg [31:0] uops_4_debug_inst; // @[util.scala:466:20] reg uops_4_is_rvc; // @[util.scala:466:20] reg [39:0] uops_4_debug_pc; // @[util.scala:466:20] reg [2:0] uops_4_iq_type; // @[util.scala:466:20] reg [9:0] uops_4_fu_code; // @[util.scala:466:20] reg [3:0] uops_4_ctrl_br_type; // @[util.scala:466:20] reg [1:0] uops_4_ctrl_op1_sel; // @[util.scala:466:20] reg [2:0] uops_4_ctrl_op2_sel; // @[util.scala:466:20] reg [2:0] uops_4_ctrl_imm_sel; // @[util.scala:466:20] reg [4:0] uops_4_ctrl_op_fcn; // @[util.scala:466:20] reg uops_4_ctrl_fcn_dw; // @[util.scala:466:20] reg [2:0] uops_4_ctrl_csr_cmd; // @[util.scala:466:20] reg uops_4_ctrl_is_load; // @[util.scala:466:20] reg uops_4_ctrl_is_sta; // @[util.scala:466:20] reg uops_4_ctrl_is_std; // @[util.scala:466:20] reg [1:0] uops_4_iw_state; // @[util.scala:466:20] reg uops_4_iw_p1_poisoned; // @[util.scala:466:20] reg uops_4_iw_p2_poisoned; // @[util.scala:466:20] reg uops_4_is_br; // @[util.scala:466:20] reg uops_4_is_jalr; // @[util.scala:466:20] reg uops_4_is_jal; // @[util.scala:466:20] reg uops_4_is_sfb; // @[util.scala:466:20] reg [7:0] uops_4_br_mask; // @[util.scala:466:20] reg [2:0] uops_4_br_tag; // @[util.scala:466:20] reg [3:0] uops_4_ftq_idx; // @[util.scala:466:20] reg uops_4_edge_inst; // @[util.scala:466:20] reg [5:0] uops_4_pc_lob; // @[util.scala:466:20] reg uops_4_taken; // @[util.scala:466:20] reg [19:0] uops_4_imm_packed; // @[util.scala:466:20] reg [11:0] uops_4_csr_addr; // @[util.scala:466:20] reg [4:0] uops_4_rob_idx; // @[util.scala:466:20] reg [2:0] uops_4_ldq_idx; // @[util.scala:466:20] reg [2:0] uops_4_stq_idx; // @[util.scala:466:20] reg [1:0] uops_4_rxq_idx; // @[util.scala:466:20] reg [5:0] uops_4_pdst; // @[util.scala:466:20] reg [5:0] uops_4_prs1; // @[util.scala:466:20] reg [5:0] uops_4_prs2; // @[util.scala:466:20] reg [5:0] uops_4_prs3; // @[util.scala:466:20] reg [3:0] uops_4_ppred; // @[util.scala:466:20] reg uops_4_prs1_busy; // @[util.scala:466:20] reg uops_4_prs2_busy; // @[util.scala:466:20] reg uops_4_prs3_busy; // @[util.scala:466:20] reg uops_4_ppred_busy; // @[util.scala:466:20] reg [5:0] uops_4_stale_pdst; // @[util.scala:466:20] reg uops_4_exception; // @[util.scala:466:20] reg [63:0] uops_4_exc_cause; // @[util.scala:466:20] reg uops_4_bypassable; // @[util.scala:466:20] reg [4:0] uops_4_mem_cmd; // @[util.scala:466:20] reg [1:0] uops_4_mem_size; // @[util.scala:466:20] reg uops_4_mem_signed; // @[util.scala:466:20] reg uops_4_is_fence; // @[util.scala:466:20] reg uops_4_is_fencei; // @[util.scala:466:20] reg uops_4_is_amo; // @[util.scala:466:20] reg uops_4_uses_ldq; // @[util.scala:466:20] reg uops_4_uses_stq; // @[util.scala:466:20] reg uops_4_is_sys_pc2epc; // @[util.scala:466:20] reg uops_4_is_unique; // @[util.scala:466:20] reg uops_4_flush_on_commit; // @[util.scala:466:20] reg uops_4_ldst_is_rs1; // @[util.scala:466:20] reg [5:0] uops_4_ldst; // @[util.scala:466:20] reg [5:0] uops_4_lrs1; // @[util.scala:466:20] reg [5:0] uops_4_lrs2; // @[util.scala:466:20] reg [5:0] uops_4_lrs3; // @[util.scala:466:20] reg uops_4_ldst_val; // @[util.scala:466:20] reg [1:0] uops_4_dst_rtype; // @[util.scala:466:20] reg [1:0] uops_4_lrs1_rtype; // @[util.scala:466:20] reg [1:0] uops_4_lrs2_rtype; // @[util.scala:466:20] reg uops_4_frs3_en; // @[util.scala:466:20] reg uops_4_fp_val; // @[util.scala:466:20] reg uops_4_fp_single; // @[util.scala:466:20] reg uops_4_xcpt_pf_if; // @[util.scala:466:20] reg uops_4_xcpt_ae_if; // @[util.scala:466:20] reg uops_4_xcpt_ma_if; // @[util.scala:466:20] reg uops_4_bp_debug_if; // @[util.scala:466:20] reg uops_4_bp_xcpt_if; // @[util.scala:466:20] reg [1:0] uops_4_debug_fsrc; // @[util.scala:466:20] reg [1:0] uops_4_debug_tsrc; // @[util.scala:466:20] reg [6:0] uops_5_uopc; // @[util.scala:466:20] reg [31:0] uops_5_inst; // @[util.scala:466:20] reg [31:0] uops_5_debug_inst; // @[util.scala:466:20] reg uops_5_is_rvc; // @[util.scala:466:20] reg [39:0] uops_5_debug_pc; // @[util.scala:466:20] reg [2:0] uops_5_iq_type; // @[util.scala:466:20] reg [9:0] uops_5_fu_code; // @[util.scala:466:20] reg [3:0] uops_5_ctrl_br_type; // @[util.scala:466:20] reg [1:0] uops_5_ctrl_op1_sel; // @[util.scala:466:20] reg [2:0] uops_5_ctrl_op2_sel; // @[util.scala:466:20] reg [2:0] uops_5_ctrl_imm_sel; // @[util.scala:466:20] reg [4:0] uops_5_ctrl_op_fcn; // @[util.scala:466:20] reg uops_5_ctrl_fcn_dw; // @[util.scala:466:20] reg [2:0] uops_5_ctrl_csr_cmd; // @[util.scala:466:20] reg uops_5_ctrl_is_load; // @[util.scala:466:20] reg uops_5_ctrl_is_sta; // @[util.scala:466:20] reg uops_5_ctrl_is_std; // @[util.scala:466:20] reg [1:0] uops_5_iw_state; // @[util.scala:466:20] reg uops_5_iw_p1_poisoned; // @[util.scala:466:20] reg uops_5_iw_p2_poisoned; // @[util.scala:466:20] reg uops_5_is_br; // @[util.scala:466:20] reg uops_5_is_jalr; // @[util.scala:466:20] reg uops_5_is_jal; // @[util.scala:466:20] reg uops_5_is_sfb; // @[util.scala:466:20] reg [7:0] uops_5_br_mask; // @[util.scala:466:20] reg [2:0] uops_5_br_tag; // @[util.scala:466:20] reg [3:0] uops_5_ftq_idx; // @[util.scala:466:20] reg uops_5_edge_inst; // @[util.scala:466:20] reg [5:0] uops_5_pc_lob; // @[util.scala:466:20] reg uops_5_taken; // @[util.scala:466:20] reg [19:0] uops_5_imm_packed; // @[util.scala:466:20] reg [11:0] uops_5_csr_addr; // @[util.scala:466:20] reg [4:0] uops_5_rob_idx; // @[util.scala:466:20] reg [2:0] uops_5_ldq_idx; // @[util.scala:466:20] reg [2:0] uops_5_stq_idx; // @[util.scala:466:20] reg [1:0] uops_5_rxq_idx; // @[util.scala:466:20] reg [5:0] uops_5_pdst; // @[util.scala:466:20] reg [5:0] uops_5_prs1; // @[util.scala:466:20] reg [5:0] uops_5_prs2; // @[util.scala:466:20] reg [5:0] uops_5_prs3; // @[util.scala:466:20] reg [3:0] uops_5_ppred; // @[util.scala:466:20] reg uops_5_prs1_busy; // @[util.scala:466:20] reg uops_5_prs2_busy; // @[util.scala:466:20] reg uops_5_prs3_busy; // @[util.scala:466:20] reg uops_5_ppred_busy; // @[util.scala:466:20] reg [5:0] uops_5_stale_pdst; // @[util.scala:466:20] reg uops_5_exception; // @[util.scala:466:20] reg [63:0] uops_5_exc_cause; // @[util.scala:466:20] reg uops_5_bypassable; // @[util.scala:466:20] reg [4:0] uops_5_mem_cmd; // @[util.scala:466:20] reg [1:0] uops_5_mem_size; // @[util.scala:466:20] reg uops_5_mem_signed; // @[util.scala:466:20] reg uops_5_is_fence; // @[util.scala:466:20] reg uops_5_is_fencei; // @[util.scala:466:20] reg uops_5_is_amo; // @[util.scala:466:20] reg uops_5_uses_ldq; // @[util.scala:466:20] reg uops_5_uses_stq; // @[util.scala:466:20] reg uops_5_is_sys_pc2epc; // @[util.scala:466:20] reg uops_5_is_unique; // @[util.scala:466:20] reg uops_5_flush_on_commit; // @[util.scala:466:20] reg uops_5_ldst_is_rs1; // @[util.scala:466:20] reg [5:0] uops_5_ldst; // @[util.scala:466:20] reg [5:0] uops_5_lrs1; // @[util.scala:466:20] reg [5:0] uops_5_lrs2; // @[util.scala:466:20] reg [5:0] uops_5_lrs3; // @[util.scala:466:20] reg uops_5_ldst_val; // @[util.scala:466:20] reg [1:0] uops_5_dst_rtype; // @[util.scala:466:20] reg [1:0] uops_5_lrs1_rtype; // @[util.scala:466:20] reg [1:0] uops_5_lrs2_rtype; // @[util.scala:466:20] reg uops_5_frs3_en; // @[util.scala:466:20] reg uops_5_fp_val; // @[util.scala:466:20] reg uops_5_fp_single; // @[util.scala:466:20] reg uops_5_xcpt_pf_if; // @[util.scala:466:20] reg uops_5_xcpt_ae_if; // @[util.scala:466:20] reg uops_5_xcpt_ma_if; // @[util.scala:466:20] reg uops_5_bp_debug_if; // @[util.scala:466:20] reg uops_5_bp_xcpt_if; // @[util.scala:466:20] reg [1:0] uops_5_debug_fsrc; // @[util.scala:466:20] reg [1:0] uops_5_debug_tsrc; // @[util.scala:466:20] reg [6:0] uops_6_uopc; // @[util.scala:466:20] reg [31:0] uops_6_inst; // @[util.scala:466:20] reg [31:0] uops_6_debug_inst; // @[util.scala:466:20] reg uops_6_is_rvc; // @[util.scala:466:20] reg [39:0] uops_6_debug_pc; // @[util.scala:466:20] reg [2:0] uops_6_iq_type; // @[util.scala:466:20] reg [9:0] uops_6_fu_code; // @[util.scala:466:20] reg [3:0] uops_6_ctrl_br_type; // @[util.scala:466:20] reg [1:0] uops_6_ctrl_op1_sel; // @[util.scala:466:20] reg [2:0] uops_6_ctrl_op2_sel; // @[util.scala:466:20] reg [2:0] uops_6_ctrl_imm_sel; // @[util.scala:466:20] reg [4:0] uops_6_ctrl_op_fcn; // @[util.scala:466:20] reg uops_6_ctrl_fcn_dw; // @[util.scala:466:20] reg [2:0] uops_6_ctrl_csr_cmd; // @[util.scala:466:20] reg uops_6_ctrl_is_load; // @[util.scala:466:20] reg uops_6_ctrl_is_sta; // @[util.scala:466:20] reg uops_6_ctrl_is_std; // @[util.scala:466:20] reg [1:0] uops_6_iw_state; // @[util.scala:466:20] reg uops_6_iw_p1_poisoned; // @[util.scala:466:20] reg uops_6_iw_p2_poisoned; // @[util.scala:466:20] reg uops_6_is_br; // @[util.scala:466:20] reg uops_6_is_jalr; // @[util.scala:466:20] reg uops_6_is_jal; // @[util.scala:466:20] reg uops_6_is_sfb; // @[util.scala:466:20] reg [7:0] uops_6_br_mask; // @[util.scala:466:20] reg [2:0] uops_6_br_tag; // @[util.scala:466:20] reg [3:0] uops_6_ftq_idx; // @[util.scala:466:20] reg uops_6_edge_inst; // @[util.scala:466:20] reg [5:0] uops_6_pc_lob; // @[util.scala:466:20] reg uops_6_taken; // @[util.scala:466:20] reg [19:0] uops_6_imm_packed; // @[util.scala:466:20] reg [11:0] uops_6_csr_addr; // @[util.scala:466:20] reg [4:0] uops_6_rob_idx; // @[util.scala:466:20] reg [2:0] uops_6_ldq_idx; // @[util.scala:466:20] reg [2:0] uops_6_stq_idx; // @[util.scala:466:20] reg [1:0] uops_6_rxq_idx; // @[util.scala:466:20] reg [5:0] uops_6_pdst; // @[util.scala:466:20] reg [5:0] uops_6_prs1; // @[util.scala:466:20] reg [5:0] uops_6_prs2; // @[util.scala:466:20] reg [5:0] uops_6_prs3; // @[util.scala:466:20] reg [3:0] uops_6_ppred; // @[util.scala:466:20] reg uops_6_prs1_busy; // @[util.scala:466:20] reg uops_6_prs2_busy; // @[util.scala:466:20] reg uops_6_prs3_busy; // @[util.scala:466:20] reg uops_6_ppred_busy; // @[util.scala:466:20] reg [5:0] uops_6_stale_pdst; // @[util.scala:466:20] reg uops_6_exception; // @[util.scala:466:20] reg [63:0] uops_6_exc_cause; // @[util.scala:466:20] reg uops_6_bypassable; // @[util.scala:466:20] reg [4:0] uops_6_mem_cmd; // @[util.scala:466:20] reg [1:0] uops_6_mem_size; // @[util.scala:466:20] reg uops_6_mem_signed; // @[util.scala:466:20] reg uops_6_is_fence; // @[util.scala:466:20] reg uops_6_is_fencei; // @[util.scala:466:20] reg uops_6_is_amo; // @[util.scala:466:20] reg uops_6_uses_ldq; // @[util.scala:466:20] reg uops_6_uses_stq; // @[util.scala:466:20] reg uops_6_is_sys_pc2epc; // @[util.scala:466:20] reg uops_6_is_unique; // @[util.scala:466:20] reg uops_6_flush_on_commit; // @[util.scala:466:20] reg uops_6_ldst_is_rs1; // @[util.scala:466:20] reg [5:0] uops_6_ldst; // @[util.scala:466:20] reg [5:0] uops_6_lrs1; // @[util.scala:466:20] reg [5:0] uops_6_lrs2; // @[util.scala:466:20] reg [5:0] uops_6_lrs3; // @[util.scala:466:20] reg uops_6_ldst_val; // @[util.scala:466:20] reg [1:0] uops_6_dst_rtype; // @[util.scala:466:20] reg [1:0] uops_6_lrs1_rtype; // @[util.scala:466:20] reg [1:0] uops_6_lrs2_rtype; // @[util.scala:466:20] reg uops_6_frs3_en; // @[util.scala:466:20] reg uops_6_fp_val; // @[util.scala:466:20] reg uops_6_fp_single; // @[util.scala:466:20] reg uops_6_xcpt_pf_if; // @[util.scala:466:20] reg uops_6_xcpt_ae_if; // @[util.scala:466:20] reg uops_6_xcpt_ma_if; // @[util.scala:466:20] reg uops_6_bp_debug_if; // @[util.scala:466:20] reg uops_6_bp_xcpt_if; // @[util.scala:466:20] reg [1:0] uops_6_debug_fsrc; // @[util.scala:466:20] reg [1:0] uops_6_debug_tsrc; // @[util.scala:466:20] reg [2:0] enq_ptr_value; // @[Counter.scala:61:40] reg [2:0] deq_ptr_value; // @[Counter.scala:61:40] reg maybe_full; // @[util.scala:470:27] wire ptr_match = enq_ptr_value == deq_ptr_value; // @[Counter.scala:61:40] wire _io_empty_T = ~maybe_full; // @[util.scala:470:27, :473:28] assign _io_empty_T_1 = ptr_match & _io_empty_T; // @[util.scala:472:33, :473:{25,28}] assign io_empty_0 = _io_empty_T_1; // @[util.scala:448:7, :473:25] wire full = ptr_match & maybe_full; // @[util.scala:470:27, :472:33, :474:24] wire _do_enq_T = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35] wire do_enq; // @[util.scala:475:24] wire [7:0] _GEN = {{valids_0}, {valids_6}, {valids_5}, {valids_4}, {valids_3}, {valids_2}, {valids_1}, {valids_0}}; // @[util.scala:465:24, :476:42] wire _GEN_0 = _GEN[deq_ptr_value]; // @[Counter.scala:61:40] wire _do_deq_T = ~_GEN_0; // @[util.scala:476:42] wire _do_deq_T_1 = io_deq_ready_0 | _do_deq_T; // @[util.scala:448:7, :476:{39,42}] wire _do_deq_T_2 = ~io_empty_0; // @[util.scala:448:7, :476:69] wire _do_deq_T_3 = _do_deq_T_1 & _do_deq_T_2; // @[util.scala:476:{39,66,69}] wire do_deq; // @[util.scala:476:24] wire [7:0] _valids_0_T = io_brupdate_b1_mispredict_mask_0 & uops_0_br_mask; // @[util.scala:118:51, :448:7, :466:20] wire _valids_0_T_1 = |_valids_0_T; // @[util.scala:118:{51,59}] wire _valids_0_T_2 = ~_valids_0_T_1; // @[util.scala:118:59, :481:32] wire _valids_0_T_3 = valids_0 & _valids_0_T_2; // @[util.scala:465:24, :481:{29,32}] wire _valids_0_T_5 = ~_valids_0_T_4; // @[util.scala:481:{72,83}] wire _valids_0_T_6 = _valids_0_T_3 & _valids_0_T_5; // @[util.scala:481:{29,69,72}] wire [7:0] _uops_0_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:89:23, :448:7] wire [7:0] _uops_0_br_mask_T_1 = uops_0_br_mask & _uops_0_br_mask_T; // @[util.scala:89:{21,23}, :466:20] wire [7:0] _valids_1_T = io_brupdate_b1_mispredict_mask_0 & uops_1_br_mask; // @[util.scala:118:51, :448:7, :466:20] wire _valids_1_T_1 = |_valids_1_T; // @[util.scala:118:{51,59}] wire _valids_1_T_2 = ~_valids_1_T_1; // @[util.scala:118:59, :481:32] wire _valids_1_T_3 = valids_1 & _valids_1_T_2; // @[util.scala:465:24, :481:{29,32}] wire _valids_1_T_5 = ~_valids_1_T_4; // @[util.scala:481:{72,83}] wire _valids_1_T_6 = _valids_1_T_3 & _valids_1_T_5; // @[util.scala:481:{29,69,72}] wire [7:0] _uops_1_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:89:23, :448:7] wire [7:0] _uops_1_br_mask_T_1 = uops_1_br_mask & _uops_1_br_mask_T; // @[util.scala:89:{21,23}, :466:20] wire [7:0] _valids_2_T = io_brupdate_b1_mispredict_mask_0 & uops_2_br_mask; // @[util.scala:118:51, :448:7, :466:20] wire _valids_2_T_1 = |_valids_2_T; // @[util.scala:118:{51,59}] wire _valids_2_T_2 = ~_valids_2_T_1; // @[util.scala:118:59, :481:32] wire _valids_2_T_3 = valids_2 & _valids_2_T_2; // @[util.scala:465:24, :481:{29,32}] wire _valids_2_T_5 = ~_valids_2_T_4; // @[util.scala:481:{72,83}] wire _valids_2_T_6 = _valids_2_T_3 & _valids_2_T_5; // @[util.scala:481:{29,69,72}] wire [7:0] _uops_2_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:89:23, :448:7] wire [7:0] _uops_2_br_mask_T_1 = uops_2_br_mask & _uops_2_br_mask_T; // @[util.scala:89:{21,23}, :466:20] wire [7:0] _valids_3_T = io_brupdate_b1_mispredict_mask_0 & uops_3_br_mask; // @[util.scala:118:51, :448:7, :466:20] wire _valids_3_T_1 = |_valids_3_T; // @[util.scala:118:{51,59}] wire _valids_3_T_2 = ~_valids_3_T_1; // @[util.scala:118:59, :481:32] wire _valids_3_T_3 = valids_3 & _valids_3_T_2; // @[util.scala:465:24, :481:{29,32}] wire _valids_3_T_5 = ~_valids_3_T_4; // @[util.scala:481:{72,83}] wire _valids_3_T_6 = _valids_3_T_3 & _valids_3_T_5; // @[util.scala:481:{29,69,72}] wire [7:0] _uops_3_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:89:23, :448:7] wire [7:0] _uops_3_br_mask_T_1 = uops_3_br_mask & _uops_3_br_mask_T; // @[util.scala:89:{21,23}, :466:20] wire [7:0] _valids_4_T = io_brupdate_b1_mispredict_mask_0 & uops_4_br_mask; // @[util.scala:118:51, :448:7, :466:20] wire _valids_4_T_1 = |_valids_4_T; // @[util.scala:118:{51,59}] wire _valids_4_T_2 = ~_valids_4_T_1; // @[util.scala:118:59, :481:32] wire _valids_4_T_3 = valids_4 & _valids_4_T_2; // @[util.scala:465:24, :481:{29,32}] wire _valids_4_T_5 = ~_valids_4_T_4; // @[util.scala:481:{72,83}] wire _valids_4_T_6 = _valids_4_T_3 & _valids_4_T_5; // @[util.scala:481:{29,69,72}] wire [7:0] _uops_4_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:89:23, :448:7] wire [7:0] _uops_4_br_mask_T_1 = uops_4_br_mask & _uops_4_br_mask_T; // @[util.scala:89:{21,23}, :466:20] wire [7:0] _valids_5_T = io_brupdate_b1_mispredict_mask_0 & uops_5_br_mask; // @[util.scala:118:51, :448:7, :466:20] wire _valids_5_T_1 = |_valids_5_T; // @[util.scala:118:{51,59}] wire _valids_5_T_2 = ~_valids_5_T_1; // @[util.scala:118:59, :481:32] wire _valids_5_T_3 = valids_5 & _valids_5_T_2; // @[util.scala:465:24, :481:{29,32}] wire _valids_5_T_5 = ~_valids_5_T_4; // @[util.scala:481:{72,83}] wire _valids_5_T_6 = _valids_5_T_3 & _valids_5_T_5; // @[util.scala:481:{29,69,72}] wire [7:0] _uops_5_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:89:23, :448:7] wire [7:0] _uops_5_br_mask_T_1 = uops_5_br_mask & _uops_5_br_mask_T; // @[util.scala:89:{21,23}, :466:20] wire [7:0] _valids_6_T = io_brupdate_b1_mispredict_mask_0 & uops_6_br_mask; // @[util.scala:118:51, :448:7, :466:20] wire _valids_6_T_1 = |_valids_6_T; // @[util.scala:118:{51,59}] wire _valids_6_T_2 = ~_valids_6_T_1; // @[util.scala:118:59, :481:32] wire _valids_6_T_3 = valids_6 & _valids_6_T_2; // @[util.scala:465:24, :481:{29,32}] wire _valids_6_T_5 = ~_valids_6_T_4; // @[util.scala:481:{72,83}] wire _valids_6_T_6 = _valids_6_T_3 & _valids_6_T_5; // @[util.scala:481:{29,69,72}] wire [7:0] _uops_6_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:89:23, :448:7] wire [7:0] _uops_6_br_mask_T_1 = uops_6_br_mask & _uops_6_br_mask_T; // @[util.scala:89:{21,23}, :466:20] wire wrap = enq_ptr_value == 3'h6; // @[Counter.scala:61:40, :73:24] wire [7:0] _uops_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:85:27, :89:23, :448:7] wire [7:0] _uops_br_mask_T_1 = io_enq_bits_uop_br_mask_0 & _uops_br_mask_T; // @[util.scala:85:{25,27}, :448:7] wire [3:0] _GEN_1 = {1'h0, enq_ptr_value}; // @[Counter.scala:61:40, :77:24] wire [3:0] _value_T = _GEN_1 + 4'h1; // @[Counter.scala:77:24] wire [2:0] _value_T_1 = _value_T[2:0]; // @[Counter.scala:77:24] wire wrap_1 = deq_ptr_value == 3'h6; // @[Counter.scala:61:40, :73:24] wire [3:0] _GEN_2 = {1'h0, deq_ptr_value}; // @[Counter.scala:61:40, :77:24] wire [3:0] _value_T_2 = _GEN_2 + 4'h1; // @[Counter.scala:77:24] wire [2:0] _value_T_3 = _value_T_2[2:0]; // @[Counter.scala:77:24] assign _io_enq_ready_T = ~full; // @[util.scala:474:24, :504:19] assign io_enq_ready_0 = _io_enq_ready_T; // @[util.scala:448:7, :504:19] wire [3:0] out_uop_ctrl_br_type; // @[util.scala:506:17] wire [1:0] out_uop_ctrl_op1_sel; // @[util.scala:506:17] wire [2:0] out_uop_ctrl_op2_sel; // @[util.scala:506:17] wire [2:0] out_uop_ctrl_imm_sel; // @[util.scala:506:17] wire [4:0] out_uop_ctrl_op_fcn; // @[util.scala:506:17] wire out_uop_ctrl_fcn_dw; // @[util.scala:506:17] wire [2:0] out_uop_ctrl_csr_cmd; // @[util.scala:506:17] wire out_uop_ctrl_is_load; // @[util.scala:506:17] wire out_uop_ctrl_is_sta; // @[util.scala:506:17] wire out_uop_ctrl_is_std; // @[util.scala:506:17] wire [6:0] out_uop_uopc; // @[util.scala:506:17] wire [31:0] out_uop_inst; // @[util.scala:506:17] wire [31:0] out_uop_debug_inst; // @[util.scala:506:17] wire out_uop_is_rvc; // @[util.scala:506:17] wire [39:0] out_uop_debug_pc; // @[util.scala:506:17] wire [2:0] out_uop_iq_type; // @[util.scala:506:17] wire [9:0] out_uop_fu_code; // @[util.scala:506:17] wire [1:0] out_uop_iw_state; // @[util.scala:506:17] wire out_uop_iw_p1_poisoned; // @[util.scala:506:17] wire out_uop_iw_p2_poisoned; // @[util.scala:506:17] wire out_uop_is_br; // @[util.scala:506:17] wire out_uop_is_jalr; // @[util.scala:506:17] wire out_uop_is_jal; // @[util.scala:506:17] wire out_uop_is_sfb; // @[util.scala:506:17] wire [7:0] out_uop_br_mask; // @[util.scala:506:17] wire [2:0] out_uop_br_tag; // @[util.scala:506:17] wire [3:0] out_uop_ftq_idx; // @[util.scala:506:17] wire out_uop_edge_inst; // @[util.scala:506:17] wire [5:0] out_uop_pc_lob; // @[util.scala:506:17] wire out_uop_taken; // @[util.scala:506:17] wire [19:0] out_uop_imm_packed; // @[util.scala:506:17] wire [11:0] out_uop_csr_addr; // @[util.scala:506:17] wire [4:0] out_uop_rob_idx; // @[util.scala:506:17] wire [2:0] out_uop_ldq_idx; // @[util.scala:506:17] wire [2:0] out_uop_stq_idx; // @[util.scala:506:17] wire [1:0] out_uop_rxq_idx; // @[util.scala:506:17] wire [5:0] out_uop_pdst; // @[util.scala:506:17] wire [5:0] out_uop_prs1; // @[util.scala:506:17] wire [5:0] out_uop_prs2; // @[util.scala:506:17] wire [5:0] out_uop_prs3; // @[util.scala:506:17] wire [3:0] out_uop_ppred; // @[util.scala:506:17] wire out_uop_prs1_busy; // @[util.scala:506:17] wire out_uop_prs2_busy; // @[util.scala:506:17] wire out_uop_prs3_busy; // @[util.scala:506:17] wire out_uop_ppred_busy; // @[util.scala:506:17] wire [5:0] out_uop_stale_pdst; // @[util.scala:506:17] wire out_uop_exception; // @[util.scala:506:17] wire [63:0] out_uop_exc_cause; // @[util.scala:506:17] wire out_uop_bypassable; // @[util.scala:506:17] wire [4:0] out_uop_mem_cmd; // @[util.scala:506:17] wire [1:0] out_uop_mem_size; // @[util.scala:506:17] wire out_uop_mem_signed; // @[util.scala:506:17] wire out_uop_is_fence; // @[util.scala:506:17] wire out_uop_is_fencei; // @[util.scala:506:17] wire out_uop_is_amo; // @[util.scala:506:17] wire out_uop_uses_ldq; // @[util.scala:506:17] wire out_uop_uses_stq; // @[util.scala:506:17] wire out_uop_is_sys_pc2epc; // @[util.scala:506:17] wire out_uop_is_unique; // @[util.scala:506:17] wire out_uop_flush_on_commit; // @[util.scala:506:17] wire out_uop_ldst_is_rs1; // @[util.scala:506:17] wire [5:0] out_uop_ldst; // @[util.scala:506:17] wire [5:0] out_uop_lrs1; // @[util.scala:506:17] wire [5:0] out_uop_lrs2; // @[util.scala:506:17] wire [5:0] out_uop_lrs3; // @[util.scala:506:17] wire out_uop_ldst_val; // @[util.scala:506:17] wire [1:0] out_uop_dst_rtype; // @[util.scala:506:17] wire [1:0] out_uop_lrs1_rtype; // @[util.scala:506:17] wire [1:0] out_uop_lrs2_rtype; // @[util.scala:506:17] wire out_uop_frs3_en; // @[util.scala:506:17] wire out_uop_fp_val; // @[util.scala:506:17] wire out_uop_fp_single; // @[util.scala:506:17] wire out_uop_xcpt_pf_if; // @[util.scala:506:17] wire out_uop_xcpt_ae_if; // @[util.scala:506:17] wire out_uop_xcpt_ma_if; // @[util.scala:506:17] wire out_uop_bp_debug_if; // @[util.scala:506:17] wire out_uop_bp_xcpt_if; // @[util.scala:506:17] wire [1:0] out_uop_debug_fsrc; // @[util.scala:506:17] wire [1:0] out_uop_debug_tsrc; // @[util.scala:506:17] wire [7:0][6:0] _GEN_3 = {{uops_0_uopc}, {uops_6_uopc}, {uops_5_uopc}, {uops_4_uopc}, {uops_3_uopc}, {uops_2_uopc}, {uops_1_uopc}, {uops_0_uopc}}; // @[util.scala:466:20, :508:19] assign out_uop_uopc = _GEN_3[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][31:0] _GEN_4 = {{uops_0_inst}, {uops_6_inst}, {uops_5_inst}, {uops_4_inst}, {uops_3_inst}, {uops_2_inst}, {uops_1_inst}, {uops_0_inst}}; // @[util.scala:466:20, :508:19] assign out_uop_inst = _GEN_4[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][31:0] _GEN_5 = {{uops_0_debug_inst}, {uops_6_debug_inst}, {uops_5_debug_inst}, {uops_4_debug_inst}, {uops_3_debug_inst}, {uops_2_debug_inst}, {uops_1_debug_inst}, {uops_0_debug_inst}}; // @[util.scala:466:20, :508:19] assign out_uop_debug_inst = _GEN_5[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_6 = {{uops_0_is_rvc}, {uops_6_is_rvc}, {uops_5_is_rvc}, {uops_4_is_rvc}, {uops_3_is_rvc}, {uops_2_is_rvc}, {uops_1_is_rvc}, {uops_0_is_rvc}}; // @[util.scala:466:20, :508:19] assign out_uop_is_rvc = _GEN_6[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][39:0] _GEN_7 = {{uops_0_debug_pc}, {uops_6_debug_pc}, {uops_5_debug_pc}, {uops_4_debug_pc}, {uops_3_debug_pc}, {uops_2_debug_pc}, {uops_1_debug_pc}, {uops_0_debug_pc}}; // @[util.scala:466:20, :508:19] assign out_uop_debug_pc = _GEN_7[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][2:0] _GEN_8 = {{uops_0_iq_type}, {uops_6_iq_type}, {uops_5_iq_type}, {uops_4_iq_type}, {uops_3_iq_type}, {uops_2_iq_type}, {uops_1_iq_type}, {uops_0_iq_type}}; // @[util.scala:466:20, :508:19] assign out_uop_iq_type = _GEN_8[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][9:0] _GEN_9 = {{uops_0_fu_code}, {uops_6_fu_code}, {uops_5_fu_code}, {uops_4_fu_code}, {uops_3_fu_code}, {uops_2_fu_code}, {uops_1_fu_code}, {uops_0_fu_code}}; // @[util.scala:466:20, :508:19] assign out_uop_fu_code = _GEN_9[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][3:0] _GEN_10 = {{uops_0_ctrl_br_type}, {uops_6_ctrl_br_type}, {uops_5_ctrl_br_type}, {uops_4_ctrl_br_type}, {uops_3_ctrl_br_type}, {uops_2_ctrl_br_type}, {uops_1_ctrl_br_type}, {uops_0_ctrl_br_type}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_br_type = _GEN_10[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][1:0] _GEN_11 = {{uops_0_ctrl_op1_sel}, {uops_6_ctrl_op1_sel}, {uops_5_ctrl_op1_sel}, {uops_4_ctrl_op1_sel}, {uops_3_ctrl_op1_sel}, {uops_2_ctrl_op1_sel}, {uops_1_ctrl_op1_sel}, {uops_0_ctrl_op1_sel}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_op1_sel = _GEN_11[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][2:0] _GEN_12 = {{uops_0_ctrl_op2_sel}, {uops_6_ctrl_op2_sel}, {uops_5_ctrl_op2_sel}, {uops_4_ctrl_op2_sel}, {uops_3_ctrl_op2_sel}, {uops_2_ctrl_op2_sel}, {uops_1_ctrl_op2_sel}, {uops_0_ctrl_op2_sel}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_op2_sel = _GEN_12[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][2:0] _GEN_13 = {{uops_0_ctrl_imm_sel}, {uops_6_ctrl_imm_sel}, {uops_5_ctrl_imm_sel}, {uops_4_ctrl_imm_sel}, {uops_3_ctrl_imm_sel}, {uops_2_ctrl_imm_sel}, {uops_1_ctrl_imm_sel}, {uops_0_ctrl_imm_sel}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_imm_sel = _GEN_13[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][4:0] _GEN_14 = {{uops_0_ctrl_op_fcn}, {uops_6_ctrl_op_fcn}, {uops_5_ctrl_op_fcn}, {uops_4_ctrl_op_fcn}, {uops_3_ctrl_op_fcn}, {uops_2_ctrl_op_fcn}, {uops_1_ctrl_op_fcn}, {uops_0_ctrl_op_fcn}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_op_fcn = _GEN_14[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_15 = {{uops_0_ctrl_fcn_dw}, {uops_6_ctrl_fcn_dw}, {uops_5_ctrl_fcn_dw}, {uops_4_ctrl_fcn_dw}, {uops_3_ctrl_fcn_dw}, {uops_2_ctrl_fcn_dw}, {uops_1_ctrl_fcn_dw}, {uops_0_ctrl_fcn_dw}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_fcn_dw = _GEN_15[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][2:0] _GEN_16 = {{uops_0_ctrl_csr_cmd}, {uops_6_ctrl_csr_cmd}, {uops_5_ctrl_csr_cmd}, {uops_4_ctrl_csr_cmd}, {uops_3_ctrl_csr_cmd}, {uops_2_ctrl_csr_cmd}, {uops_1_ctrl_csr_cmd}, {uops_0_ctrl_csr_cmd}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_csr_cmd = _GEN_16[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_17 = {{uops_0_ctrl_is_load}, {uops_6_ctrl_is_load}, {uops_5_ctrl_is_load}, {uops_4_ctrl_is_load}, {uops_3_ctrl_is_load}, {uops_2_ctrl_is_load}, {uops_1_ctrl_is_load}, {uops_0_ctrl_is_load}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_is_load = _GEN_17[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_18 = {{uops_0_ctrl_is_sta}, {uops_6_ctrl_is_sta}, {uops_5_ctrl_is_sta}, {uops_4_ctrl_is_sta}, {uops_3_ctrl_is_sta}, {uops_2_ctrl_is_sta}, {uops_1_ctrl_is_sta}, {uops_0_ctrl_is_sta}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_is_sta = _GEN_18[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_19 = {{uops_0_ctrl_is_std}, {uops_6_ctrl_is_std}, {uops_5_ctrl_is_std}, {uops_4_ctrl_is_std}, {uops_3_ctrl_is_std}, {uops_2_ctrl_is_std}, {uops_1_ctrl_is_std}, {uops_0_ctrl_is_std}}; // @[util.scala:466:20, :508:19] assign out_uop_ctrl_is_std = _GEN_19[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][1:0] _GEN_20 = {{uops_0_iw_state}, {uops_6_iw_state}, {uops_5_iw_state}, {uops_4_iw_state}, {uops_3_iw_state}, {uops_2_iw_state}, {uops_1_iw_state}, {uops_0_iw_state}}; // @[util.scala:466:20, :508:19] assign out_uop_iw_state = _GEN_20[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_21 = {{uops_0_iw_p1_poisoned}, {uops_6_iw_p1_poisoned}, {uops_5_iw_p1_poisoned}, {uops_4_iw_p1_poisoned}, {uops_3_iw_p1_poisoned}, {uops_2_iw_p1_poisoned}, {uops_1_iw_p1_poisoned}, {uops_0_iw_p1_poisoned}}; // @[util.scala:466:20, :508:19] assign out_uop_iw_p1_poisoned = _GEN_21[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_22 = {{uops_0_iw_p2_poisoned}, {uops_6_iw_p2_poisoned}, {uops_5_iw_p2_poisoned}, {uops_4_iw_p2_poisoned}, {uops_3_iw_p2_poisoned}, {uops_2_iw_p2_poisoned}, {uops_1_iw_p2_poisoned}, {uops_0_iw_p2_poisoned}}; // @[util.scala:466:20, :508:19] assign out_uop_iw_p2_poisoned = _GEN_22[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_23 = {{uops_0_is_br}, {uops_6_is_br}, {uops_5_is_br}, {uops_4_is_br}, {uops_3_is_br}, {uops_2_is_br}, {uops_1_is_br}, {uops_0_is_br}}; // @[util.scala:466:20, :508:19] assign out_uop_is_br = _GEN_23[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_24 = {{uops_0_is_jalr}, {uops_6_is_jalr}, {uops_5_is_jalr}, {uops_4_is_jalr}, {uops_3_is_jalr}, {uops_2_is_jalr}, {uops_1_is_jalr}, {uops_0_is_jalr}}; // @[util.scala:466:20, :508:19] assign out_uop_is_jalr = _GEN_24[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_25 = {{uops_0_is_jal}, {uops_6_is_jal}, {uops_5_is_jal}, {uops_4_is_jal}, {uops_3_is_jal}, {uops_2_is_jal}, {uops_1_is_jal}, {uops_0_is_jal}}; // @[util.scala:466:20, :508:19] assign out_uop_is_jal = _GEN_25[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_26 = {{uops_0_is_sfb}, {uops_6_is_sfb}, {uops_5_is_sfb}, {uops_4_is_sfb}, {uops_3_is_sfb}, {uops_2_is_sfb}, {uops_1_is_sfb}, {uops_0_is_sfb}}; // @[util.scala:466:20, :508:19] assign out_uop_is_sfb = _GEN_26[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][7:0] _GEN_27 = {{uops_0_br_mask}, {uops_6_br_mask}, {uops_5_br_mask}, {uops_4_br_mask}, {uops_3_br_mask}, {uops_2_br_mask}, {uops_1_br_mask}, {uops_0_br_mask}}; // @[util.scala:466:20, :508:19] assign out_uop_br_mask = _GEN_27[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][2:0] _GEN_28 = {{uops_0_br_tag}, {uops_6_br_tag}, {uops_5_br_tag}, {uops_4_br_tag}, {uops_3_br_tag}, {uops_2_br_tag}, {uops_1_br_tag}, {uops_0_br_tag}}; // @[util.scala:466:20, :508:19] assign out_uop_br_tag = _GEN_28[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][3:0] _GEN_29 = {{uops_0_ftq_idx}, {uops_6_ftq_idx}, {uops_5_ftq_idx}, {uops_4_ftq_idx}, {uops_3_ftq_idx}, {uops_2_ftq_idx}, {uops_1_ftq_idx}, {uops_0_ftq_idx}}; // @[util.scala:466:20, :508:19] assign out_uop_ftq_idx = _GEN_29[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_30 = {{uops_0_edge_inst}, {uops_6_edge_inst}, {uops_5_edge_inst}, {uops_4_edge_inst}, {uops_3_edge_inst}, {uops_2_edge_inst}, {uops_1_edge_inst}, {uops_0_edge_inst}}; // @[util.scala:466:20, :508:19] assign out_uop_edge_inst = _GEN_30[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][5:0] _GEN_31 = {{uops_0_pc_lob}, {uops_6_pc_lob}, {uops_5_pc_lob}, {uops_4_pc_lob}, {uops_3_pc_lob}, {uops_2_pc_lob}, {uops_1_pc_lob}, {uops_0_pc_lob}}; // @[util.scala:466:20, :508:19] assign out_uop_pc_lob = _GEN_31[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_32 = {{uops_0_taken}, {uops_6_taken}, {uops_5_taken}, {uops_4_taken}, {uops_3_taken}, {uops_2_taken}, {uops_1_taken}, {uops_0_taken}}; // @[util.scala:466:20, :508:19] assign out_uop_taken = _GEN_32[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][19:0] _GEN_33 = {{uops_0_imm_packed}, {uops_6_imm_packed}, {uops_5_imm_packed}, {uops_4_imm_packed}, {uops_3_imm_packed}, {uops_2_imm_packed}, {uops_1_imm_packed}, {uops_0_imm_packed}}; // @[util.scala:466:20, :508:19] assign out_uop_imm_packed = _GEN_33[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][11:0] _GEN_34 = {{uops_0_csr_addr}, {uops_6_csr_addr}, {uops_5_csr_addr}, {uops_4_csr_addr}, {uops_3_csr_addr}, {uops_2_csr_addr}, {uops_1_csr_addr}, {uops_0_csr_addr}}; // @[util.scala:466:20, :508:19] assign out_uop_csr_addr = _GEN_34[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][4:0] _GEN_35 = {{uops_0_rob_idx}, {uops_6_rob_idx}, {uops_5_rob_idx}, {uops_4_rob_idx}, {uops_3_rob_idx}, {uops_2_rob_idx}, {uops_1_rob_idx}, {uops_0_rob_idx}}; // @[util.scala:466:20, :508:19] assign out_uop_rob_idx = _GEN_35[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][2:0] _GEN_36 = {{uops_0_ldq_idx}, {uops_6_ldq_idx}, {uops_5_ldq_idx}, {uops_4_ldq_idx}, {uops_3_ldq_idx}, {uops_2_ldq_idx}, {uops_1_ldq_idx}, {uops_0_ldq_idx}}; // @[util.scala:466:20, :508:19] assign out_uop_ldq_idx = _GEN_36[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][2:0] _GEN_37 = {{uops_0_stq_idx}, {uops_6_stq_idx}, {uops_5_stq_idx}, {uops_4_stq_idx}, {uops_3_stq_idx}, {uops_2_stq_idx}, {uops_1_stq_idx}, {uops_0_stq_idx}}; // @[util.scala:466:20, :508:19] assign out_uop_stq_idx = _GEN_37[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][1:0] _GEN_38 = {{uops_0_rxq_idx}, {uops_6_rxq_idx}, {uops_5_rxq_idx}, {uops_4_rxq_idx}, {uops_3_rxq_idx}, {uops_2_rxq_idx}, {uops_1_rxq_idx}, {uops_0_rxq_idx}}; // @[util.scala:466:20, :508:19] assign out_uop_rxq_idx = _GEN_38[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][5:0] _GEN_39 = {{uops_0_pdst}, {uops_6_pdst}, {uops_5_pdst}, {uops_4_pdst}, {uops_3_pdst}, {uops_2_pdst}, {uops_1_pdst}, {uops_0_pdst}}; // @[util.scala:466:20, :508:19] assign out_uop_pdst = _GEN_39[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][5:0] _GEN_40 = {{uops_0_prs1}, {uops_6_prs1}, {uops_5_prs1}, {uops_4_prs1}, {uops_3_prs1}, {uops_2_prs1}, {uops_1_prs1}, {uops_0_prs1}}; // @[util.scala:466:20, :508:19] assign out_uop_prs1 = _GEN_40[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][5:0] _GEN_41 = {{uops_0_prs2}, {uops_6_prs2}, {uops_5_prs2}, {uops_4_prs2}, {uops_3_prs2}, {uops_2_prs2}, {uops_1_prs2}, {uops_0_prs2}}; // @[util.scala:466:20, :508:19] assign out_uop_prs2 = _GEN_41[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][5:0] _GEN_42 = {{uops_0_prs3}, {uops_6_prs3}, {uops_5_prs3}, {uops_4_prs3}, {uops_3_prs3}, {uops_2_prs3}, {uops_1_prs3}, {uops_0_prs3}}; // @[util.scala:466:20, :508:19] assign out_uop_prs3 = _GEN_42[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][3:0] _GEN_43 = {{uops_0_ppred}, {uops_6_ppred}, {uops_5_ppred}, {uops_4_ppred}, {uops_3_ppred}, {uops_2_ppred}, {uops_1_ppred}, {uops_0_ppred}}; // @[util.scala:466:20, :508:19] assign out_uop_ppred = _GEN_43[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_44 = {{uops_0_prs1_busy}, {uops_6_prs1_busy}, {uops_5_prs1_busy}, {uops_4_prs1_busy}, {uops_3_prs1_busy}, {uops_2_prs1_busy}, {uops_1_prs1_busy}, {uops_0_prs1_busy}}; // @[util.scala:466:20, :508:19] assign out_uop_prs1_busy = _GEN_44[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_45 = {{uops_0_prs2_busy}, {uops_6_prs2_busy}, {uops_5_prs2_busy}, {uops_4_prs2_busy}, {uops_3_prs2_busy}, {uops_2_prs2_busy}, {uops_1_prs2_busy}, {uops_0_prs2_busy}}; // @[util.scala:466:20, :508:19] assign out_uop_prs2_busy = _GEN_45[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_46 = {{uops_0_prs3_busy}, {uops_6_prs3_busy}, {uops_5_prs3_busy}, {uops_4_prs3_busy}, {uops_3_prs3_busy}, {uops_2_prs3_busy}, {uops_1_prs3_busy}, {uops_0_prs3_busy}}; // @[util.scala:466:20, :508:19] assign out_uop_prs3_busy = _GEN_46[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_47 = {{uops_0_ppred_busy}, {uops_6_ppred_busy}, {uops_5_ppred_busy}, {uops_4_ppred_busy}, {uops_3_ppred_busy}, {uops_2_ppred_busy}, {uops_1_ppred_busy}, {uops_0_ppred_busy}}; // @[util.scala:466:20, :508:19] assign out_uop_ppred_busy = _GEN_47[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][5:0] _GEN_48 = {{uops_0_stale_pdst}, {uops_6_stale_pdst}, {uops_5_stale_pdst}, {uops_4_stale_pdst}, {uops_3_stale_pdst}, {uops_2_stale_pdst}, {uops_1_stale_pdst}, {uops_0_stale_pdst}}; // @[util.scala:466:20, :508:19] assign out_uop_stale_pdst = _GEN_48[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_49 = {{uops_0_exception}, {uops_6_exception}, {uops_5_exception}, {uops_4_exception}, {uops_3_exception}, {uops_2_exception}, {uops_1_exception}, {uops_0_exception}}; // @[util.scala:466:20, :508:19] assign out_uop_exception = _GEN_49[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][63:0] _GEN_50 = {{uops_0_exc_cause}, {uops_6_exc_cause}, {uops_5_exc_cause}, {uops_4_exc_cause}, {uops_3_exc_cause}, {uops_2_exc_cause}, {uops_1_exc_cause}, {uops_0_exc_cause}}; // @[util.scala:466:20, :508:19] assign out_uop_exc_cause = _GEN_50[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_51 = {{uops_0_bypassable}, {uops_6_bypassable}, {uops_5_bypassable}, {uops_4_bypassable}, {uops_3_bypassable}, {uops_2_bypassable}, {uops_1_bypassable}, {uops_0_bypassable}}; // @[util.scala:466:20, :508:19] assign out_uop_bypassable = _GEN_51[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][4:0] _GEN_52 = {{uops_0_mem_cmd}, {uops_6_mem_cmd}, {uops_5_mem_cmd}, {uops_4_mem_cmd}, {uops_3_mem_cmd}, {uops_2_mem_cmd}, {uops_1_mem_cmd}, {uops_0_mem_cmd}}; // @[util.scala:466:20, :508:19] assign out_uop_mem_cmd = _GEN_52[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][1:0] _GEN_53 = {{uops_0_mem_size}, {uops_6_mem_size}, {uops_5_mem_size}, {uops_4_mem_size}, {uops_3_mem_size}, {uops_2_mem_size}, {uops_1_mem_size}, {uops_0_mem_size}}; // @[util.scala:466:20, :508:19] assign out_uop_mem_size = _GEN_53[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_54 = {{uops_0_mem_signed}, {uops_6_mem_signed}, {uops_5_mem_signed}, {uops_4_mem_signed}, {uops_3_mem_signed}, {uops_2_mem_signed}, {uops_1_mem_signed}, {uops_0_mem_signed}}; // @[util.scala:466:20, :508:19] assign out_uop_mem_signed = _GEN_54[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_55 = {{uops_0_is_fence}, {uops_6_is_fence}, {uops_5_is_fence}, {uops_4_is_fence}, {uops_3_is_fence}, {uops_2_is_fence}, {uops_1_is_fence}, {uops_0_is_fence}}; // @[util.scala:466:20, :508:19] assign out_uop_is_fence = _GEN_55[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_56 = {{uops_0_is_fencei}, {uops_6_is_fencei}, {uops_5_is_fencei}, {uops_4_is_fencei}, {uops_3_is_fencei}, {uops_2_is_fencei}, {uops_1_is_fencei}, {uops_0_is_fencei}}; // @[util.scala:466:20, :508:19] assign out_uop_is_fencei = _GEN_56[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_57 = {{uops_0_is_amo}, {uops_6_is_amo}, {uops_5_is_amo}, {uops_4_is_amo}, {uops_3_is_amo}, {uops_2_is_amo}, {uops_1_is_amo}, {uops_0_is_amo}}; // @[util.scala:466:20, :508:19] assign out_uop_is_amo = _GEN_57[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_58 = {{uops_0_uses_ldq}, {uops_6_uses_ldq}, {uops_5_uses_ldq}, {uops_4_uses_ldq}, {uops_3_uses_ldq}, {uops_2_uses_ldq}, {uops_1_uses_ldq}, {uops_0_uses_ldq}}; // @[util.scala:466:20, :508:19] assign out_uop_uses_ldq = _GEN_58[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_59 = {{uops_0_uses_stq}, {uops_6_uses_stq}, {uops_5_uses_stq}, {uops_4_uses_stq}, {uops_3_uses_stq}, {uops_2_uses_stq}, {uops_1_uses_stq}, {uops_0_uses_stq}}; // @[util.scala:466:20, :508:19] assign out_uop_uses_stq = _GEN_59[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_60 = {{uops_0_is_sys_pc2epc}, {uops_6_is_sys_pc2epc}, {uops_5_is_sys_pc2epc}, {uops_4_is_sys_pc2epc}, {uops_3_is_sys_pc2epc}, {uops_2_is_sys_pc2epc}, {uops_1_is_sys_pc2epc}, {uops_0_is_sys_pc2epc}}; // @[util.scala:466:20, :508:19] assign out_uop_is_sys_pc2epc = _GEN_60[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_61 = {{uops_0_is_unique}, {uops_6_is_unique}, {uops_5_is_unique}, {uops_4_is_unique}, {uops_3_is_unique}, {uops_2_is_unique}, {uops_1_is_unique}, {uops_0_is_unique}}; // @[util.scala:466:20, :508:19] assign out_uop_is_unique = _GEN_61[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_62 = {{uops_0_flush_on_commit}, {uops_6_flush_on_commit}, {uops_5_flush_on_commit}, {uops_4_flush_on_commit}, {uops_3_flush_on_commit}, {uops_2_flush_on_commit}, {uops_1_flush_on_commit}, {uops_0_flush_on_commit}}; // @[util.scala:466:20, :508:19] assign out_uop_flush_on_commit = _GEN_62[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_63 = {{uops_0_ldst_is_rs1}, {uops_6_ldst_is_rs1}, {uops_5_ldst_is_rs1}, {uops_4_ldst_is_rs1}, {uops_3_ldst_is_rs1}, {uops_2_ldst_is_rs1}, {uops_1_ldst_is_rs1}, {uops_0_ldst_is_rs1}}; // @[util.scala:466:20, :508:19] assign out_uop_ldst_is_rs1 = _GEN_63[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][5:0] _GEN_64 = {{uops_0_ldst}, {uops_6_ldst}, {uops_5_ldst}, {uops_4_ldst}, {uops_3_ldst}, {uops_2_ldst}, {uops_1_ldst}, {uops_0_ldst}}; // @[util.scala:466:20, :508:19] assign out_uop_ldst = _GEN_64[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][5:0] _GEN_65 = {{uops_0_lrs1}, {uops_6_lrs1}, {uops_5_lrs1}, {uops_4_lrs1}, {uops_3_lrs1}, {uops_2_lrs1}, {uops_1_lrs1}, {uops_0_lrs1}}; // @[util.scala:466:20, :508:19] assign out_uop_lrs1 = _GEN_65[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][5:0] _GEN_66 = {{uops_0_lrs2}, {uops_6_lrs2}, {uops_5_lrs2}, {uops_4_lrs2}, {uops_3_lrs2}, {uops_2_lrs2}, {uops_1_lrs2}, {uops_0_lrs2}}; // @[util.scala:466:20, :508:19] assign out_uop_lrs2 = _GEN_66[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][5:0] _GEN_67 = {{uops_0_lrs3}, {uops_6_lrs3}, {uops_5_lrs3}, {uops_4_lrs3}, {uops_3_lrs3}, {uops_2_lrs3}, {uops_1_lrs3}, {uops_0_lrs3}}; // @[util.scala:466:20, :508:19] assign out_uop_lrs3 = _GEN_67[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_68 = {{uops_0_ldst_val}, {uops_6_ldst_val}, {uops_5_ldst_val}, {uops_4_ldst_val}, {uops_3_ldst_val}, {uops_2_ldst_val}, {uops_1_ldst_val}, {uops_0_ldst_val}}; // @[util.scala:466:20, :508:19] assign out_uop_ldst_val = _GEN_68[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][1:0] _GEN_69 = {{uops_0_dst_rtype}, {uops_6_dst_rtype}, {uops_5_dst_rtype}, {uops_4_dst_rtype}, {uops_3_dst_rtype}, {uops_2_dst_rtype}, {uops_1_dst_rtype}, {uops_0_dst_rtype}}; // @[util.scala:466:20, :508:19] assign out_uop_dst_rtype = _GEN_69[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][1:0] _GEN_70 = {{uops_0_lrs1_rtype}, {uops_6_lrs1_rtype}, {uops_5_lrs1_rtype}, {uops_4_lrs1_rtype}, {uops_3_lrs1_rtype}, {uops_2_lrs1_rtype}, {uops_1_lrs1_rtype}, {uops_0_lrs1_rtype}}; // @[util.scala:466:20, :508:19] assign out_uop_lrs1_rtype = _GEN_70[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][1:0] _GEN_71 = {{uops_0_lrs2_rtype}, {uops_6_lrs2_rtype}, {uops_5_lrs2_rtype}, {uops_4_lrs2_rtype}, {uops_3_lrs2_rtype}, {uops_2_lrs2_rtype}, {uops_1_lrs2_rtype}, {uops_0_lrs2_rtype}}; // @[util.scala:466:20, :508:19] assign out_uop_lrs2_rtype = _GEN_71[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_72 = {{uops_0_frs3_en}, {uops_6_frs3_en}, {uops_5_frs3_en}, {uops_4_frs3_en}, {uops_3_frs3_en}, {uops_2_frs3_en}, {uops_1_frs3_en}, {uops_0_frs3_en}}; // @[util.scala:466:20, :508:19] assign out_uop_frs3_en = _GEN_72[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_73 = {{uops_0_fp_val}, {uops_6_fp_val}, {uops_5_fp_val}, {uops_4_fp_val}, {uops_3_fp_val}, {uops_2_fp_val}, {uops_1_fp_val}, {uops_0_fp_val}}; // @[util.scala:466:20, :508:19] assign out_uop_fp_val = _GEN_73[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_74 = {{uops_0_fp_single}, {uops_6_fp_single}, {uops_5_fp_single}, {uops_4_fp_single}, {uops_3_fp_single}, {uops_2_fp_single}, {uops_1_fp_single}, {uops_0_fp_single}}; // @[util.scala:466:20, :508:19] assign out_uop_fp_single = _GEN_74[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_75 = {{uops_0_xcpt_pf_if}, {uops_6_xcpt_pf_if}, {uops_5_xcpt_pf_if}, {uops_4_xcpt_pf_if}, {uops_3_xcpt_pf_if}, {uops_2_xcpt_pf_if}, {uops_1_xcpt_pf_if}, {uops_0_xcpt_pf_if}}; // @[util.scala:466:20, :508:19] assign out_uop_xcpt_pf_if = _GEN_75[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_76 = {{uops_0_xcpt_ae_if}, {uops_6_xcpt_ae_if}, {uops_5_xcpt_ae_if}, {uops_4_xcpt_ae_if}, {uops_3_xcpt_ae_if}, {uops_2_xcpt_ae_if}, {uops_1_xcpt_ae_if}, {uops_0_xcpt_ae_if}}; // @[util.scala:466:20, :508:19] assign out_uop_xcpt_ae_if = _GEN_76[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_77 = {{uops_0_xcpt_ma_if}, {uops_6_xcpt_ma_if}, {uops_5_xcpt_ma_if}, {uops_4_xcpt_ma_if}, {uops_3_xcpt_ma_if}, {uops_2_xcpt_ma_if}, {uops_1_xcpt_ma_if}, {uops_0_xcpt_ma_if}}; // @[util.scala:466:20, :508:19] assign out_uop_xcpt_ma_if = _GEN_77[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_78 = {{uops_0_bp_debug_if}, {uops_6_bp_debug_if}, {uops_5_bp_debug_if}, {uops_4_bp_debug_if}, {uops_3_bp_debug_if}, {uops_2_bp_debug_if}, {uops_1_bp_debug_if}, {uops_0_bp_debug_if}}; // @[util.scala:466:20, :508:19] assign out_uop_bp_debug_if = _GEN_78[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0] _GEN_79 = {{uops_0_bp_xcpt_if}, {uops_6_bp_xcpt_if}, {uops_5_bp_xcpt_if}, {uops_4_bp_xcpt_if}, {uops_3_bp_xcpt_if}, {uops_2_bp_xcpt_if}, {uops_1_bp_xcpt_if}, {uops_0_bp_xcpt_if}}; // @[util.scala:466:20, :508:19] assign out_uop_bp_xcpt_if = _GEN_79[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][1:0] _GEN_80 = {{uops_0_debug_fsrc}, {uops_6_debug_fsrc}, {uops_5_debug_fsrc}, {uops_4_debug_fsrc}, {uops_3_debug_fsrc}, {uops_2_debug_fsrc}, {uops_1_debug_fsrc}, {uops_0_debug_fsrc}}; // @[util.scala:466:20, :508:19] assign out_uop_debug_fsrc = _GEN_80[deq_ptr_value]; // @[Counter.scala:61:40] wire [7:0][1:0] _GEN_81 = {{uops_0_debug_tsrc}, {uops_6_debug_tsrc}, {uops_5_debug_tsrc}, {uops_4_debug_tsrc}, {uops_3_debug_tsrc}, {uops_2_debug_tsrc}, {uops_1_debug_tsrc}, {uops_0_debug_tsrc}}; // @[util.scala:466:20, :508:19] assign out_uop_debug_tsrc = _GEN_81[deq_ptr_value]; // @[Counter.scala:61:40] wire _io_deq_valid_T = ~io_empty_0; // @[util.scala:448:7, :476:69, :509:30] wire _io_deq_valid_T_1 = _io_deq_valid_T & _GEN_0; // @[util.scala:476:42, :509:{30,40}] wire [7:0] _io_deq_valid_T_2 = io_brupdate_b1_mispredict_mask_0 & out_uop_br_mask; // @[util.scala:118:51, :448:7, :506:17] wire _io_deq_valid_T_3 = |_io_deq_valid_T_2; // @[util.scala:118:{51,59}] wire _io_deq_valid_T_4 = ~_io_deq_valid_T_3; // @[util.scala:118:59, :509:68] wire _io_deq_valid_T_5 = _io_deq_valid_T_1 & _io_deq_valid_T_4; // @[util.scala:509:{40,65,68}] wire _io_deq_valid_T_7 = ~_io_deq_valid_T_6; // @[util.scala:509:{111,122}] wire _io_deq_valid_T_8 = _io_deq_valid_T_5 & _io_deq_valid_T_7; // @[util.scala:509:{65,108,111}] wire [7:0] _io_deq_bits_uop_br_mask_T = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:85:27, :89:23, :448:7] wire [7:0] _io_deq_bits_uop_br_mask_T_1 = out_uop_br_mask & _io_deq_bits_uop_br_mask_T; // @[util.scala:85:{25,27}, :506:17] assign io_deq_valid_0 = io_empty_0 ? io_enq_valid_0 : _io_deq_valid_T_8; // @[util.scala:448:7, :509:{27,108}, :515:21, :516:20] assign io_deq_bits_uop_uopc_0 = io_empty_0 ? io_enq_bits_uop_uopc_0 : out_uop_uopc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_inst_0 = io_empty_0 ? io_enq_bits_uop_inst_0 : out_uop_inst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_debug_inst_0 = io_empty_0 ? io_enq_bits_uop_debug_inst_0 : out_uop_debug_inst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_is_rvc_0 = io_empty_0 ? io_enq_bits_uop_is_rvc_0 : out_uop_is_rvc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_debug_pc_0 = io_empty_0 ? io_enq_bits_uop_debug_pc_0 : out_uop_debug_pc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_iq_type_0 = io_empty_0 ? io_enq_bits_uop_iq_type_0 : out_uop_iq_type; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_fu_code_0 = io_empty_0 ? io_enq_bits_uop_fu_code_0 : out_uop_fu_code; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ctrl_br_type_0 = io_empty_0 ? io_enq_bits_uop_ctrl_br_type_0 : out_uop_ctrl_br_type; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ctrl_op1_sel_0 = io_empty_0 ? io_enq_bits_uop_ctrl_op1_sel_0 : out_uop_ctrl_op1_sel; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ctrl_op2_sel_0 = io_empty_0 ? io_enq_bits_uop_ctrl_op2_sel_0 : out_uop_ctrl_op2_sel; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ctrl_imm_sel_0 = io_empty_0 ? io_enq_bits_uop_ctrl_imm_sel_0 : out_uop_ctrl_imm_sel; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ctrl_op_fcn_0 = io_empty_0 ? io_enq_bits_uop_ctrl_op_fcn_0 : out_uop_ctrl_op_fcn; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ctrl_fcn_dw_0 = io_empty_0 ? io_enq_bits_uop_ctrl_fcn_dw_0 : out_uop_ctrl_fcn_dw; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ctrl_csr_cmd_0 = io_empty_0 ? io_enq_bits_uop_ctrl_csr_cmd_0 : out_uop_ctrl_csr_cmd; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ctrl_is_load_0 = io_empty_0 ? io_enq_bits_uop_ctrl_is_load_0 : out_uop_ctrl_is_load; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ctrl_is_sta_0 = io_empty_0 ? io_enq_bits_uop_ctrl_is_sta_0 : out_uop_ctrl_is_sta; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ctrl_is_std_0 = io_empty_0 ? io_enq_bits_uop_ctrl_is_std_0 : out_uop_ctrl_is_std; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_iw_state_0 = io_empty_0 ? io_enq_bits_uop_iw_state_0 : out_uop_iw_state; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_iw_p1_poisoned_0 = io_empty_0 ? io_enq_bits_uop_iw_p1_poisoned_0 : out_uop_iw_p1_poisoned; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_iw_p2_poisoned_0 = io_empty_0 ? io_enq_bits_uop_iw_p2_poisoned_0 : out_uop_iw_p2_poisoned; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_is_br_0 = io_empty_0 ? io_enq_bits_uop_is_br_0 : out_uop_is_br; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_is_jalr_0 = io_empty_0 ? io_enq_bits_uop_is_jalr_0 : out_uop_is_jalr; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_is_jal_0 = io_empty_0 ? io_enq_bits_uop_is_jal_0 : out_uop_is_jal; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_is_sfb_0 = io_empty_0 ? io_enq_bits_uop_is_sfb_0 : out_uop_is_sfb; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_br_tag_0 = io_empty_0 ? io_enq_bits_uop_br_tag_0 : out_uop_br_tag; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ftq_idx_0 = io_empty_0 ? io_enq_bits_uop_ftq_idx_0 : out_uop_ftq_idx; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_edge_inst_0 = io_empty_0 ? io_enq_bits_uop_edge_inst_0 : out_uop_edge_inst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_pc_lob_0 = io_empty_0 ? io_enq_bits_uop_pc_lob_0 : out_uop_pc_lob; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_taken_0 = io_empty_0 ? io_enq_bits_uop_taken_0 : out_uop_taken; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_imm_packed_0 = io_empty_0 ? io_enq_bits_uop_imm_packed_0 : out_uop_imm_packed; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_csr_addr_0 = io_empty_0 ? io_enq_bits_uop_csr_addr_0 : out_uop_csr_addr; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_rob_idx_0 = io_empty_0 ? io_enq_bits_uop_rob_idx_0 : out_uop_rob_idx; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ldq_idx_0 = io_empty_0 ? io_enq_bits_uop_ldq_idx_0 : out_uop_ldq_idx; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_stq_idx_0 = io_empty_0 ? io_enq_bits_uop_stq_idx_0 : out_uop_stq_idx; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_rxq_idx_0 = io_empty_0 ? io_enq_bits_uop_rxq_idx_0 : out_uop_rxq_idx; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_pdst_0 = io_empty_0 ? io_enq_bits_uop_pdst_0 : out_uop_pdst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_prs1_0 = io_empty_0 ? io_enq_bits_uop_prs1_0 : out_uop_prs1; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_prs2_0 = io_empty_0 ? io_enq_bits_uop_prs2_0 : out_uop_prs2; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_prs3_0 = io_empty_0 ? io_enq_bits_uop_prs3_0 : out_uop_prs3; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ppred_0 = io_empty_0 ? io_enq_bits_uop_ppred_0 : out_uop_ppred; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_prs1_busy_0 = io_empty_0 ? io_enq_bits_uop_prs1_busy_0 : out_uop_prs1_busy; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_prs2_busy_0 = io_empty_0 ? io_enq_bits_uop_prs2_busy_0 : out_uop_prs2_busy; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_prs3_busy_0 = io_empty_0 ? io_enq_bits_uop_prs3_busy_0 : out_uop_prs3_busy; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ppred_busy_0 = io_empty_0 ? io_enq_bits_uop_ppred_busy_0 : out_uop_ppred_busy; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_stale_pdst_0 = io_empty_0 ? io_enq_bits_uop_stale_pdst_0 : out_uop_stale_pdst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_exception_0 = io_empty_0 ? io_enq_bits_uop_exception_0 : out_uop_exception; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_exc_cause_0 = io_empty_0 ? io_enq_bits_uop_exc_cause_0 : out_uop_exc_cause; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_bypassable_0 = io_empty_0 ? io_enq_bits_uop_bypassable_0 : out_uop_bypassable; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_mem_cmd_0 = io_empty_0 ? io_enq_bits_uop_mem_cmd_0 : out_uop_mem_cmd; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_mem_size_0 = io_empty_0 ? io_enq_bits_uop_mem_size_0 : out_uop_mem_size; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_mem_signed_0 = io_empty_0 ? io_enq_bits_uop_mem_signed_0 : out_uop_mem_signed; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_is_fence_0 = io_empty_0 ? io_enq_bits_uop_is_fence_0 : out_uop_is_fence; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_is_fencei_0 = io_empty_0 ? io_enq_bits_uop_is_fencei_0 : out_uop_is_fencei; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_is_amo_0 = io_empty_0 ? io_enq_bits_uop_is_amo_0 : out_uop_is_amo; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_uses_ldq_0 = io_empty_0 ? io_enq_bits_uop_uses_ldq_0 : out_uop_uses_ldq; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_uses_stq_0 = io_empty_0 ? io_enq_bits_uop_uses_stq_0 : out_uop_uses_stq; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_is_sys_pc2epc_0 = io_empty_0 ? io_enq_bits_uop_is_sys_pc2epc_0 : out_uop_is_sys_pc2epc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_is_unique_0 = io_empty_0 ? io_enq_bits_uop_is_unique_0 : out_uop_is_unique; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_flush_on_commit_0 = io_empty_0 ? io_enq_bits_uop_flush_on_commit_0 : out_uop_flush_on_commit; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ldst_is_rs1_0 = io_empty_0 ? io_enq_bits_uop_ldst_is_rs1_0 : out_uop_ldst_is_rs1; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ldst_0 = io_empty_0 ? io_enq_bits_uop_ldst_0 : out_uop_ldst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_lrs1_0 = io_empty_0 ? io_enq_bits_uop_lrs1_0 : out_uop_lrs1; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_lrs2_0 = io_empty_0 ? io_enq_bits_uop_lrs2_0 : out_uop_lrs2; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_lrs3_0 = io_empty_0 ? io_enq_bits_uop_lrs3_0 : out_uop_lrs3; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_ldst_val_0 = io_empty_0 ? io_enq_bits_uop_ldst_val_0 : out_uop_ldst_val; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_dst_rtype_0 = io_empty_0 ? io_enq_bits_uop_dst_rtype_0 : out_uop_dst_rtype; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_lrs1_rtype_0 = io_empty_0 ? io_enq_bits_uop_lrs1_rtype_0 : out_uop_lrs1_rtype; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_lrs2_rtype_0 = io_empty_0 ? io_enq_bits_uop_lrs2_rtype_0 : out_uop_lrs2_rtype; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_frs3_en_0 = io_empty_0 ? io_enq_bits_uop_frs3_en_0 : out_uop_frs3_en; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_fp_val_0 = io_empty_0 ? io_enq_bits_uop_fp_val_0 : out_uop_fp_val; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_fp_single_0 = io_empty_0 ? io_enq_bits_uop_fp_single_0 : out_uop_fp_single; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_xcpt_pf_if_0 = io_empty_0 ? io_enq_bits_uop_xcpt_pf_if_0 : out_uop_xcpt_pf_if; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_xcpt_ae_if_0 = io_empty_0 ? io_enq_bits_uop_xcpt_ae_if_0 : out_uop_xcpt_ae_if; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_xcpt_ma_if_0 = io_empty_0 ? io_enq_bits_uop_xcpt_ma_if_0 : out_uop_xcpt_ma_if; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_bp_debug_if_0 = io_empty_0 ? io_enq_bits_uop_bp_debug_if_0 : out_uop_bp_debug_if; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_bp_xcpt_if_0 = io_empty_0 ? io_enq_bits_uop_bp_xcpt_if_0 : out_uop_bp_xcpt_if; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_debug_fsrc_0 = io_empty_0 ? io_enq_bits_uop_debug_fsrc_0 : out_uop_debug_fsrc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_uop_debug_tsrc_0 = io_empty_0 ? io_enq_bits_uop_debug_tsrc_0 : out_uop_debug_tsrc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_data_0 = io_empty_0 ? io_enq_bits_data_0 : out_data; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_predicated_0 = ~io_empty_0 & out_predicated; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_valid_0 = io_empty_0 ? io_enq_bits_fflags_valid_0 : out_fflags_valid; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_uopc_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_uopc_0 : out_fflags_bits_uop_uopc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_inst_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_inst_0 : out_fflags_bits_uop_inst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_debug_inst_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_debug_inst_0 : out_fflags_bits_uop_debug_inst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_is_rvc_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_is_rvc_0 : out_fflags_bits_uop_is_rvc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_debug_pc_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_debug_pc_0 : out_fflags_bits_uop_debug_pc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_iq_type_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_iq_type_0 : out_fflags_bits_uop_iq_type; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_fu_code_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_fu_code_0 : out_fflags_bits_uop_fu_code; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ctrl_br_type_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ctrl_br_type_0 : out_fflags_bits_uop_ctrl_br_type; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ctrl_op1_sel_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ctrl_op1_sel_0 : out_fflags_bits_uop_ctrl_op1_sel; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ctrl_op2_sel_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ctrl_op2_sel_0 : out_fflags_bits_uop_ctrl_op2_sel; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ctrl_imm_sel_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ctrl_imm_sel_0 : out_fflags_bits_uop_ctrl_imm_sel; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ctrl_op_fcn_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ctrl_op_fcn_0 : out_fflags_bits_uop_ctrl_op_fcn; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ctrl_fcn_dw_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ctrl_fcn_dw_0 : out_fflags_bits_uop_ctrl_fcn_dw; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ctrl_csr_cmd_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ctrl_csr_cmd_0 : out_fflags_bits_uop_ctrl_csr_cmd; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ctrl_is_load_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ctrl_is_load_0 : out_fflags_bits_uop_ctrl_is_load; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ctrl_is_sta_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ctrl_is_sta_0 : out_fflags_bits_uop_ctrl_is_sta; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ctrl_is_std_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ctrl_is_std_0 : out_fflags_bits_uop_ctrl_is_std; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_iw_state_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_iw_state_0 : out_fflags_bits_uop_iw_state; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_iw_p1_poisoned_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_iw_p1_poisoned_0 : out_fflags_bits_uop_iw_p1_poisoned; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_iw_p2_poisoned_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_iw_p2_poisoned_0 : out_fflags_bits_uop_iw_p2_poisoned; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_is_br_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_is_br_0 : out_fflags_bits_uop_is_br; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_is_jalr_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_is_jalr_0 : out_fflags_bits_uop_is_jalr; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_is_jal_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_is_jal_0 : out_fflags_bits_uop_is_jal; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_is_sfb_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_is_sfb_0 : out_fflags_bits_uop_is_sfb; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_br_mask_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_br_mask_0 : out_fflags_bits_uop_br_mask; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_br_tag_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_br_tag_0 : out_fflags_bits_uop_br_tag; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ftq_idx_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ftq_idx_0 : out_fflags_bits_uop_ftq_idx; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_edge_inst_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_edge_inst_0 : out_fflags_bits_uop_edge_inst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_pc_lob_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_pc_lob_0 : out_fflags_bits_uop_pc_lob; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_taken_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_taken_0 : out_fflags_bits_uop_taken; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_imm_packed_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_imm_packed_0 : out_fflags_bits_uop_imm_packed; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_csr_addr_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_csr_addr_0 : out_fflags_bits_uop_csr_addr; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_rob_idx_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_rob_idx_0 : out_fflags_bits_uop_rob_idx; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ldq_idx_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ldq_idx_0 : out_fflags_bits_uop_ldq_idx; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_stq_idx_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_stq_idx_0 : out_fflags_bits_uop_stq_idx; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_rxq_idx_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_rxq_idx_0 : out_fflags_bits_uop_rxq_idx; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_pdst_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_pdst_0 : out_fflags_bits_uop_pdst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_prs1_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_prs1_0 : out_fflags_bits_uop_prs1; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_prs2_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_prs2_0 : out_fflags_bits_uop_prs2; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_prs3_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_prs3_0 : out_fflags_bits_uop_prs3; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ppred_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ppred_0 : out_fflags_bits_uop_ppred; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_prs1_busy_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_prs1_busy_0 : out_fflags_bits_uop_prs1_busy; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_prs2_busy_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_prs2_busy_0 : out_fflags_bits_uop_prs2_busy; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_prs3_busy_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_prs3_busy_0 : out_fflags_bits_uop_prs3_busy; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ppred_busy_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ppred_busy_0 : out_fflags_bits_uop_ppred_busy; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_stale_pdst_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_stale_pdst_0 : out_fflags_bits_uop_stale_pdst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_exception_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_exception_0 : out_fflags_bits_uop_exception; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_exc_cause_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_exc_cause_0 : out_fflags_bits_uop_exc_cause; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_bypassable_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_bypassable_0 : out_fflags_bits_uop_bypassable; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_mem_cmd_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_mem_cmd_0 : out_fflags_bits_uop_mem_cmd; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_mem_size_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_mem_size_0 : out_fflags_bits_uop_mem_size; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_mem_signed_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_mem_signed_0 : out_fflags_bits_uop_mem_signed; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_is_fence_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_is_fence_0 : out_fflags_bits_uop_is_fence; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_is_fencei_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_is_fencei_0 : out_fflags_bits_uop_is_fencei; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_is_amo_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_is_amo_0 : out_fflags_bits_uop_is_amo; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_uses_ldq_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_uses_ldq_0 : out_fflags_bits_uop_uses_ldq; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_uses_stq_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_uses_stq_0 : out_fflags_bits_uop_uses_stq; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_is_sys_pc2epc_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_is_sys_pc2epc_0 : out_fflags_bits_uop_is_sys_pc2epc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_is_unique_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_is_unique_0 : out_fflags_bits_uop_is_unique; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_flush_on_commit_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_flush_on_commit_0 : out_fflags_bits_uop_flush_on_commit; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ldst_is_rs1_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ldst_is_rs1_0 : out_fflags_bits_uop_ldst_is_rs1; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ldst_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ldst_0 : out_fflags_bits_uop_ldst; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_lrs1_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_lrs1_0 : out_fflags_bits_uop_lrs1; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_lrs2_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_lrs2_0 : out_fflags_bits_uop_lrs2; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_lrs3_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_lrs3_0 : out_fflags_bits_uop_lrs3; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_ldst_val_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_ldst_val_0 : out_fflags_bits_uop_ldst_val; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_dst_rtype_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_dst_rtype_0 : out_fflags_bits_uop_dst_rtype; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_lrs1_rtype_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_lrs1_rtype_0 : out_fflags_bits_uop_lrs1_rtype; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_lrs2_rtype_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_lrs2_rtype_0 : out_fflags_bits_uop_lrs2_rtype; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_frs3_en_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_frs3_en_0 : out_fflags_bits_uop_frs3_en; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_fp_val_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_fp_val_0 : out_fflags_bits_uop_fp_val; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_fp_single_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_fp_single_0 : out_fflags_bits_uop_fp_single; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_xcpt_pf_if_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_xcpt_pf_if_0 : out_fflags_bits_uop_xcpt_pf_if; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_xcpt_ae_if_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_xcpt_ae_if_0 : out_fflags_bits_uop_xcpt_ae_if; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_xcpt_ma_if_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_xcpt_ma_if_0 : out_fflags_bits_uop_xcpt_ma_if; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_bp_debug_if_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_bp_debug_if_0 : out_fflags_bits_uop_bp_debug_if; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_bp_xcpt_if_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_bp_xcpt_if_0 : out_fflags_bits_uop_bp_xcpt_if; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_debug_fsrc_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_debug_fsrc_0 : out_fflags_bits_uop_debug_fsrc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_uop_debug_tsrc_0 = io_empty_0 ? io_enq_bits_fflags_bits_uop_debug_tsrc_0 : out_fflags_bits_uop_debug_tsrc; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] assign io_deq_bits_fflags_bits_flags_0 = io_empty_0 ? io_enq_bits_fflags_bits_flags_0 : out_fflags_bits_flags; // @[util.scala:448:7, :506:17, :510:27, :515:21, :517:19] wire [7:0] _io_deq_bits_uop_br_mask_T_2 = ~io_brupdate_b1_resolve_mask_0; // @[util.scala:85:27, :89:23, :448:7] wire [7:0] _io_deq_bits_uop_br_mask_T_3 = io_enq_bits_uop_br_mask_0 & _io_deq_bits_uop_br_mask_T_2; // @[util.scala:85:{25,27}, :448:7] assign io_deq_bits_uop_br_mask_0 = io_empty_0 ? _io_deq_bits_uop_br_mask_T_3 : _io_deq_bits_uop_br_mask_T_1; // @[util.scala:85:25, :448:7, :511:27, :515:21, :518:31] assign do_deq = ~io_empty_0 & _do_deq_T_3; // @[util.scala:448:7, :476:{24,66}, :510:27, :515:21, :517:19, :520:14] assign do_enq = ~(io_empty_0 & io_deq_ready_0) & _do_enq_T; // @[Decoupled.scala:51:35] wire [3:0] _ptr_diff_T = _GEN_1 - _GEN_2; // @[Counter.scala:77:24] wire [2:0] ptr_diff = _ptr_diff_T[2:0]; // @[util.scala:524:40] wire [2:0] _io_count_T = {3{maybe_full}}; // @[util.scala:470:27, :530:24] wire _io_count_T_1 = deq_ptr_value > enq_ptr_value; // @[Counter.scala:61:40] wire [3:0] _io_count_T_2 = {1'h0, ptr_diff} + 4'h7; // @[util.scala:524:40, :533:40] wire [2:0] _io_count_T_3 = _io_count_T_2[2:0]; // @[util.scala:533:40] wire [2:0] _io_count_T_4 = _io_count_T_1 ? _io_count_T_3 : ptr_diff; // @[util.scala:524:40, :532:{24,39}, :533:40] assign _io_count_T_5 = ptr_match ? _io_count_T : _io_count_T_4; // @[util.scala:472:33, :529:20, :530:24, :532:24] assign io_count = _io_count_T_5; // @[util.scala:448:7, :529:20] wire _GEN_82 = enq_ptr_value == 3'h0; // @[Counter.scala:61:40] wire _GEN_83 = do_enq & _GEN_82; // @[util.scala:475:24, :481:16, :487:17, :489:33] wire _GEN_84 = enq_ptr_value == 3'h1; // @[Counter.scala:61:40] wire _GEN_85 = do_enq & _GEN_84; // @[util.scala:475:24, :481:16, :487:17, :489:33] wire _GEN_86 = enq_ptr_value == 3'h2; // @[Counter.scala:61:40] wire _GEN_87 = do_enq & _GEN_86; // @[util.scala:475:24, :481:16, :487:17, :489:33] wire _GEN_88 = enq_ptr_value == 3'h3; // @[Counter.scala:61:40] wire _GEN_89 = do_enq & _GEN_88; // @[util.scala:475:24, :481:16, :487:17, :489:33] wire _GEN_90 = enq_ptr_value == 3'h4; // @[Counter.scala:61:40] wire _GEN_91 = do_enq & _GEN_90; // @[util.scala:475:24, :481:16, :487:17, :489:33] wire _GEN_92 = enq_ptr_value == 3'h5; // @[Counter.scala:61:40] wire _GEN_93 = do_enq & _GEN_92; // @[util.scala:475:24, :481:16, :487:17, :489:33] wire _GEN_94 = do_enq & wrap; // @[Counter.scala:73:24] always @(posedge clock) begin // @[util.scala:448:7] if (reset) begin // @[util.scala:448:7] valids_0 <= 1'h0; // @[util.scala:465:24] valids_1 <= 1'h0; // @[util.scala:465:24] valids_2 <= 1'h0; // @[util.scala:465:24] valids_3 <= 1'h0; // @[util.scala:465:24] valids_4 <= 1'h0; // @[util.scala:465:24] valids_5 <= 1'h0; // @[util.scala:465:24] valids_6 <= 1'h0; // @[util.scala:465:24] enq_ptr_value <= 3'h0; // @[Counter.scala:61:40] deq_ptr_value <= 3'h0; // @[Counter.scala:61:40] maybe_full <= 1'h0; // @[util.scala:470:27] end else begin // @[util.scala:448:7] valids_0 <= ~(do_deq & deq_ptr_value == 3'h0) & (_GEN_83 | _valids_0_T_6); // @[Counter.scala:61:40] valids_1 <= ~(do_deq & deq_ptr_value == 3'h1) & (_GEN_85 | _valids_1_T_6); // @[Counter.scala:61:40] valids_2 <= ~(do_deq & deq_ptr_value == 3'h2) & (_GEN_87 | _valids_2_T_6); // @[Counter.scala:61:40] valids_3 <= ~(do_deq & deq_ptr_value == 3'h3) & (_GEN_89 | _valids_3_T_6); // @[Counter.scala:61:40] valids_4 <= ~(do_deq & deq_ptr_value == 3'h4) & (_GEN_91 | _valids_4_T_6); // @[Counter.scala:61:40] valids_5 <= ~(do_deq & deq_ptr_value == 3'h5) & (_GEN_93 | _valids_5_T_6); // @[Counter.scala:61:40] valids_6 <= ~(do_deq & wrap_1) & (_GEN_94 | _valids_6_T_6); // @[Counter.scala:73:24] if (do_enq) // @[util.scala:475:24] enq_ptr_value <= wrap ? 3'h0 : _value_T_1; // @[Counter.scala:61:40, :73:24, :77:{15,24}, :87:{20,28}] if (do_deq) // @[util.scala:476:24] deq_ptr_value <= wrap_1 ? 3'h0 : _value_T_3; // @[Counter.scala:61:40, :73:24, :77:{15,24}, :87:{20,28}] if (~(do_enq == do_deq)) // @[util.scala:470:27, :475:24, :476:24, :500:{16,28}, :501:16] maybe_full <= do_enq; // @[util.scala:470:27, :475:24] end if (_GEN_83) begin // @[util.scala:481:16, :487:17, :489:33] uops_0_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20] uops_0_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20] uops_0_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20] uops_0_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20] uops_0_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20] uops_0_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20] uops_0_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20] uops_0_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20] uops_0_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20] uops_0_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20] uops_0_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20] uops_0_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20] uops_0_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20] uops_0_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20] uops_0_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20] uops_0_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20] uops_0_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20] uops_0_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20] uops_0_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20] uops_0_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20] uops_0_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20] uops_0_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20] uops_0_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20] uops_0_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20] uops_0_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20] uops_0_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20] uops_0_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20] uops_0_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20] uops_0_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20] uops_0_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20] uops_0_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20] uops_0_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20] uops_0_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20] uops_0_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20] uops_0_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20] uops_0_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20] uops_0_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20] uops_0_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20] uops_0_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20] uops_0_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20] uops_0_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20] uops_0_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20] uops_0_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20] uops_0_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20] uops_0_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20] uops_0_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20] uops_0_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20] uops_0_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20] uops_0_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20] uops_0_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20] uops_0_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20] uops_0_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20] uops_0_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20] uops_0_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20] uops_0_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20] uops_0_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20] uops_0_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20] uops_0_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20] uops_0_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20] uops_0_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20] uops_0_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20] uops_0_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20] uops_0_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20] uops_0_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20] uops_0_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20] uops_0_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20] uops_0_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20] uops_0_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20] uops_0_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20] end if (do_enq & _GEN_82) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33] uops_0_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20] else if (valids_0) // @[util.scala:465:24] uops_0_br_mask <= _uops_0_br_mask_T_1; // @[util.scala:89:21, :466:20] if (_GEN_85) begin // @[util.scala:481:16, :487:17, :489:33] uops_1_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20] uops_1_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20] uops_1_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20] uops_1_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20] uops_1_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20] uops_1_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20] uops_1_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20] uops_1_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20] uops_1_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20] uops_1_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20] uops_1_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20] uops_1_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20] uops_1_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20] uops_1_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20] uops_1_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20] uops_1_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20] uops_1_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20] uops_1_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20] uops_1_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20] uops_1_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20] uops_1_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20] uops_1_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20] uops_1_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20] uops_1_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20] uops_1_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20] uops_1_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20] uops_1_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20] uops_1_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20] uops_1_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20] uops_1_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20] uops_1_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20] uops_1_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20] uops_1_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20] uops_1_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20] uops_1_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20] uops_1_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20] uops_1_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20] uops_1_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20] uops_1_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20] uops_1_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20] uops_1_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20] uops_1_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20] uops_1_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20] uops_1_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20] uops_1_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20] uops_1_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20] uops_1_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20] uops_1_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20] uops_1_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20] uops_1_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20] uops_1_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20] uops_1_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20] uops_1_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20] uops_1_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20] uops_1_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20] uops_1_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20] uops_1_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20] uops_1_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20] uops_1_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20] uops_1_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20] uops_1_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20] uops_1_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20] uops_1_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20] uops_1_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20] uops_1_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20] uops_1_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20] uops_1_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20] uops_1_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20] uops_1_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20] end if (do_enq & _GEN_84) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33] uops_1_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20] else if (valids_1) // @[util.scala:465:24] uops_1_br_mask <= _uops_1_br_mask_T_1; // @[util.scala:89:21, :466:20] if (_GEN_87) begin // @[util.scala:481:16, :487:17, :489:33] uops_2_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20] uops_2_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20] uops_2_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20] uops_2_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20] uops_2_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20] uops_2_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20] uops_2_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20] uops_2_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20] uops_2_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20] uops_2_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20] uops_2_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20] uops_2_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20] uops_2_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20] uops_2_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20] uops_2_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20] uops_2_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20] uops_2_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20] uops_2_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20] uops_2_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20] uops_2_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20] uops_2_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20] uops_2_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20] uops_2_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20] uops_2_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20] uops_2_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20] uops_2_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20] uops_2_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20] uops_2_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20] uops_2_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20] uops_2_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20] uops_2_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20] uops_2_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20] uops_2_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20] uops_2_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20] uops_2_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20] uops_2_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20] uops_2_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20] uops_2_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20] uops_2_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20] uops_2_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20] uops_2_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20] uops_2_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20] uops_2_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20] uops_2_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20] uops_2_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20] uops_2_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20] uops_2_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20] uops_2_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20] uops_2_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20] uops_2_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20] uops_2_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20] uops_2_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20] uops_2_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20] uops_2_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20] uops_2_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20] uops_2_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20] uops_2_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20] uops_2_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20] uops_2_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20] uops_2_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20] uops_2_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20] uops_2_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20] uops_2_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20] uops_2_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20] uops_2_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20] uops_2_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20] uops_2_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20] uops_2_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20] uops_2_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20] end if (do_enq & _GEN_86) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33] uops_2_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20] else if (valids_2) // @[util.scala:465:24] uops_2_br_mask <= _uops_2_br_mask_T_1; // @[util.scala:89:21, :466:20] if (_GEN_89) begin // @[util.scala:481:16, :487:17, :489:33] uops_3_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20] uops_3_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20] uops_3_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20] uops_3_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20] uops_3_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20] uops_3_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20] uops_3_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20] uops_3_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20] uops_3_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20] uops_3_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20] uops_3_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20] uops_3_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20] uops_3_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20] uops_3_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20] uops_3_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20] uops_3_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20] uops_3_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20] uops_3_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20] uops_3_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20] uops_3_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20] uops_3_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20] uops_3_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20] uops_3_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20] uops_3_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20] uops_3_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20] uops_3_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20] uops_3_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20] uops_3_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20] uops_3_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20] uops_3_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20] uops_3_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20] uops_3_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20] uops_3_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20] uops_3_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20] uops_3_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20] uops_3_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20] uops_3_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20] uops_3_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20] uops_3_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20] uops_3_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20] uops_3_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20] uops_3_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20] uops_3_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20] uops_3_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20] uops_3_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20] uops_3_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20] uops_3_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20] uops_3_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20] uops_3_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20] uops_3_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20] uops_3_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20] uops_3_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20] uops_3_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20] uops_3_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20] uops_3_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20] uops_3_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20] uops_3_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20] uops_3_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20] uops_3_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20] uops_3_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20] uops_3_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20] uops_3_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20] uops_3_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20] uops_3_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20] uops_3_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20] uops_3_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20] uops_3_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20] uops_3_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20] uops_3_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20] end if (do_enq & _GEN_88) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33] uops_3_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20] else if (valids_3) // @[util.scala:465:24] uops_3_br_mask <= _uops_3_br_mask_T_1; // @[util.scala:89:21, :466:20] if (_GEN_91) begin // @[util.scala:481:16, :487:17, :489:33] uops_4_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20] uops_4_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20] uops_4_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20] uops_4_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20] uops_4_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20] uops_4_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20] uops_4_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20] uops_4_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20] uops_4_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20] uops_4_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20] uops_4_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20] uops_4_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20] uops_4_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20] uops_4_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20] uops_4_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20] uops_4_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20] uops_4_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20] uops_4_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20] uops_4_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20] uops_4_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20] uops_4_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20] uops_4_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20] uops_4_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20] uops_4_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20] uops_4_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20] uops_4_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20] uops_4_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20] uops_4_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20] uops_4_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20] uops_4_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20] uops_4_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20] uops_4_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20] uops_4_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20] uops_4_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20] uops_4_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20] uops_4_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20] uops_4_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20] uops_4_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20] uops_4_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20] uops_4_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20] uops_4_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20] uops_4_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20] uops_4_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20] uops_4_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20] uops_4_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20] uops_4_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20] uops_4_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20] uops_4_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20] uops_4_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20] uops_4_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20] uops_4_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20] uops_4_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20] uops_4_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20] uops_4_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20] uops_4_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20] uops_4_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20] uops_4_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20] uops_4_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20] uops_4_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20] uops_4_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20] uops_4_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20] uops_4_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20] uops_4_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20] uops_4_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20] uops_4_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20] uops_4_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20] uops_4_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20] uops_4_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20] uops_4_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20] uops_4_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20] uops_4_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20] uops_4_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20] uops_4_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20] uops_4_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20] uops_4_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20] uops_4_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20] uops_4_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20] uops_4_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20] end if (do_enq & _GEN_90) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33] uops_4_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20] else if (valids_4) // @[util.scala:465:24] uops_4_br_mask <= _uops_4_br_mask_T_1; // @[util.scala:89:21, :466:20] if (_GEN_93) begin // @[util.scala:481:16, :487:17, :489:33] uops_5_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20] uops_5_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20] uops_5_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20] uops_5_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20] uops_5_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20] uops_5_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20] uops_5_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20] uops_5_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20] uops_5_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20] uops_5_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20] uops_5_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20] uops_5_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20] uops_5_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20] uops_5_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20] uops_5_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20] uops_5_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20] uops_5_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20] uops_5_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20] uops_5_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20] uops_5_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20] uops_5_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20] uops_5_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20] uops_5_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20] uops_5_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20] uops_5_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20] uops_5_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20] uops_5_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20] uops_5_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20] uops_5_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20] uops_5_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20] uops_5_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20] uops_5_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20] uops_5_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20] uops_5_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20] uops_5_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20] uops_5_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20] uops_5_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20] uops_5_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20] uops_5_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20] uops_5_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20] uops_5_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20] uops_5_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20] uops_5_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20] uops_5_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20] uops_5_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20] uops_5_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20] uops_5_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20] uops_5_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20] uops_5_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20] uops_5_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20] uops_5_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20] uops_5_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20] uops_5_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20] uops_5_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20] uops_5_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20] uops_5_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20] uops_5_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20] uops_5_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20] uops_5_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20] uops_5_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20] uops_5_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20] uops_5_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20] uops_5_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20] uops_5_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20] uops_5_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20] uops_5_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20] uops_5_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20] uops_5_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20] uops_5_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20] uops_5_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20] uops_5_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20] uops_5_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20] uops_5_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20] uops_5_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20] uops_5_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20] uops_5_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20] uops_5_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20] uops_5_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20] end if (do_enq & _GEN_92) // @[util.scala:475:24, :482:22, :487:17, :489:33, :491:33] uops_5_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20] else if (valids_5) // @[util.scala:465:24] uops_5_br_mask <= _uops_5_br_mask_T_1; // @[util.scala:89:21, :466:20] if (_GEN_94) begin // @[util.scala:481:16, :487:17, :489:33] uops_6_uopc <= io_enq_bits_uop_uopc_0; // @[util.scala:448:7, :466:20] uops_6_inst <= io_enq_bits_uop_inst_0; // @[util.scala:448:7, :466:20] uops_6_debug_inst <= io_enq_bits_uop_debug_inst_0; // @[util.scala:448:7, :466:20] uops_6_is_rvc <= io_enq_bits_uop_is_rvc_0; // @[util.scala:448:7, :466:20] uops_6_debug_pc <= io_enq_bits_uop_debug_pc_0; // @[util.scala:448:7, :466:20] uops_6_iq_type <= io_enq_bits_uop_iq_type_0; // @[util.scala:448:7, :466:20] uops_6_fu_code <= io_enq_bits_uop_fu_code_0; // @[util.scala:448:7, :466:20] uops_6_ctrl_br_type <= io_enq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7, :466:20] uops_6_ctrl_op1_sel <= io_enq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7, :466:20] uops_6_ctrl_op2_sel <= io_enq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7, :466:20] uops_6_ctrl_imm_sel <= io_enq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7, :466:20] uops_6_ctrl_op_fcn <= io_enq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7, :466:20] uops_6_ctrl_fcn_dw <= io_enq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7, :466:20] uops_6_ctrl_csr_cmd <= io_enq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7, :466:20] uops_6_ctrl_is_load <= io_enq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7, :466:20] uops_6_ctrl_is_sta <= io_enq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7, :466:20] uops_6_ctrl_is_std <= io_enq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7, :466:20] uops_6_iw_state <= io_enq_bits_uop_iw_state_0; // @[util.scala:448:7, :466:20] uops_6_iw_p1_poisoned <= io_enq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7, :466:20] uops_6_iw_p2_poisoned <= io_enq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7, :466:20] uops_6_is_br <= io_enq_bits_uop_is_br_0; // @[util.scala:448:7, :466:20] uops_6_is_jalr <= io_enq_bits_uop_is_jalr_0; // @[util.scala:448:7, :466:20] uops_6_is_jal <= io_enq_bits_uop_is_jal_0; // @[util.scala:448:7, :466:20] uops_6_is_sfb <= io_enq_bits_uop_is_sfb_0; // @[util.scala:448:7, :466:20] uops_6_br_tag <= io_enq_bits_uop_br_tag_0; // @[util.scala:448:7, :466:20] uops_6_ftq_idx <= io_enq_bits_uop_ftq_idx_0; // @[util.scala:448:7, :466:20] uops_6_edge_inst <= io_enq_bits_uop_edge_inst_0; // @[util.scala:448:7, :466:20] uops_6_pc_lob <= io_enq_bits_uop_pc_lob_0; // @[util.scala:448:7, :466:20] uops_6_taken <= io_enq_bits_uop_taken_0; // @[util.scala:448:7, :466:20] uops_6_imm_packed <= io_enq_bits_uop_imm_packed_0; // @[util.scala:448:7, :466:20] uops_6_csr_addr <= io_enq_bits_uop_csr_addr_0; // @[util.scala:448:7, :466:20] uops_6_rob_idx <= io_enq_bits_uop_rob_idx_0; // @[util.scala:448:7, :466:20] uops_6_ldq_idx <= io_enq_bits_uop_ldq_idx_0; // @[util.scala:448:7, :466:20] uops_6_stq_idx <= io_enq_bits_uop_stq_idx_0; // @[util.scala:448:7, :466:20] uops_6_rxq_idx <= io_enq_bits_uop_rxq_idx_0; // @[util.scala:448:7, :466:20] uops_6_pdst <= io_enq_bits_uop_pdst_0; // @[util.scala:448:7, :466:20] uops_6_prs1 <= io_enq_bits_uop_prs1_0; // @[util.scala:448:7, :466:20] uops_6_prs2 <= io_enq_bits_uop_prs2_0; // @[util.scala:448:7, :466:20] uops_6_prs3 <= io_enq_bits_uop_prs3_0; // @[util.scala:448:7, :466:20] uops_6_ppred <= io_enq_bits_uop_ppred_0; // @[util.scala:448:7, :466:20] uops_6_prs1_busy <= io_enq_bits_uop_prs1_busy_0; // @[util.scala:448:7, :466:20] uops_6_prs2_busy <= io_enq_bits_uop_prs2_busy_0; // @[util.scala:448:7, :466:20] uops_6_prs3_busy <= io_enq_bits_uop_prs3_busy_0; // @[util.scala:448:7, :466:20] uops_6_ppred_busy <= io_enq_bits_uop_ppred_busy_0; // @[util.scala:448:7, :466:20] uops_6_stale_pdst <= io_enq_bits_uop_stale_pdst_0; // @[util.scala:448:7, :466:20] uops_6_exception <= io_enq_bits_uop_exception_0; // @[util.scala:448:7, :466:20] uops_6_exc_cause <= io_enq_bits_uop_exc_cause_0; // @[util.scala:448:7, :466:20] uops_6_bypassable <= io_enq_bits_uop_bypassable_0; // @[util.scala:448:7, :466:20] uops_6_mem_cmd <= io_enq_bits_uop_mem_cmd_0; // @[util.scala:448:7, :466:20] uops_6_mem_size <= io_enq_bits_uop_mem_size_0; // @[util.scala:448:7, :466:20] uops_6_mem_signed <= io_enq_bits_uop_mem_signed_0; // @[util.scala:448:7, :466:20] uops_6_is_fence <= io_enq_bits_uop_is_fence_0; // @[util.scala:448:7, :466:20] uops_6_is_fencei <= io_enq_bits_uop_is_fencei_0; // @[util.scala:448:7, :466:20] uops_6_is_amo <= io_enq_bits_uop_is_amo_0; // @[util.scala:448:7, :466:20] uops_6_uses_ldq <= io_enq_bits_uop_uses_ldq_0; // @[util.scala:448:7, :466:20] uops_6_uses_stq <= io_enq_bits_uop_uses_stq_0; // @[util.scala:448:7, :466:20] uops_6_is_sys_pc2epc <= io_enq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7, :466:20] uops_6_is_unique <= io_enq_bits_uop_is_unique_0; // @[util.scala:448:7, :466:20] uops_6_flush_on_commit <= io_enq_bits_uop_flush_on_commit_0; // @[util.scala:448:7, :466:20] uops_6_ldst_is_rs1 <= io_enq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7, :466:20] uops_6_ldst <= io_enq_bits_uop_ldst_0; // @[util.scala:448:7, :466:20] uops_6_lrs1 <= io_enq_bits_uop_lrs1_0; // @[util.scala:448:7, :466:20] uops_6_lrs2 <= io_enq_bits_uop_lrs2_0; // @[util.scala:448:7, :466:20] uops_6_lrs3 <= io_enq_bits_uop_lrs3_0; // @[util.scala:448:7, :466:20] uops_6_ldst_val <= io_enq_bits_uop_ldst_val_0; // @[util.scala:448:7, :466:20] uops_6_dst_rtype <= io_enq_bits_uop_dst_rtype_0; // @[util.scala:448:7, :466:20] uops_6_lrs1_rtype <= io_enq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7, :466:20] uops_6_lrs2_rtype <= io_enq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7, :466:20] uops_6_frs3_en <= io_enq_bits_uop_frs3_en_0; // @[util.scala:448:7, :466:20] uops_6_fp_val <= io_enq_bits_uop_fp_val_0; // @[util.scala:448:7, :466:20] uops_6_fp_single <= io_enq_bits_uop_fp_single_0; // @[util.scala:448:7, :466:20] uops_6_xcpt_pf_if <= io_enq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7, :466:20] uops_6_xcpt_ae_if <= io_enq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7, :466:20] uops_6_xcpt_ma_if <= io_enq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7, :466:20] uops_6_bp_debug_if <= io_enq_bits_uop_bp_debug_if_0; // @[util.scala:448:7, :466:20] uops_6_bp_xcpt_if <= io_enq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7, :466:20] uops_6_debug_fsrc <= io_enq_bits_uop_debug_fsrc_0; // @[util.scala:448:7, :466:20] uops_6_debug_tsrc <= io_enq_bits_uop_debug_tsrc_0; // @[util.scala:448:7, :466:20] end if (do_enq & wrap) // @[Counter.scala:73:24] uops_6_br_mask <= _uops_br_mask_T_1; // @[util.scala:85:25, :466:20] else if (valids_6) // @[util.scala:465:24] uops_6_br_mask <= _uops_6_br_mask_T_1; // @[util.scala:89:21, :466:20] always @(posedge) ram_7x461 ram_ext ( // @[util.scala:464:20] .R0_addr (deq_ptr_value), // @[Counter.scala:61:40] .R0_en (1'h1), .R0_clk (clock), .R0_data (_ram_ext_R0_data), .W0_addr (enq_ptr_value), // @[Counter.scala:61:40] .W0_en (do_enq), // @[util.scala:475:24] .W0_clk (clock), .W0_data ({io_enq_bits_fflags_bits_flags_0, io_enq_bits_fflags_bits_uop_debug_tsrc_0, io_enq_bits_fflags_bits_uop_debug_fsrc_0, io_enq_bits_fflags_bits_uop_bp_xcpt_if_0, io_enq_bits_fflags_bits_uop_bp_debug_if_0, io_enq_bits_fflags_bits_uop_xcpt_ma_if_0, io_enq_bits_fflags_bits_uop_xcpt_ae_if_0, io_enq_bits_fflags_bits_uop_xcpt_pf_if_0, io_enq_bits_fflags_bits_uop_fp_single_0, io_enq_bits_fflags_bits_uop_fp_val_0, io_enq_bits_fflags_bits_uop_frs3_en_0, io_enq_bits_fflags_bits_uop_lrs2_rtype_0, io_enq_bits_fflags_bits_uop_lrs1_rtype_0, io_enq_bits_fflags_bits_uop_dst_rtype_0, io_enq_bits_fflags_bits_uop_ldst_val_0, io_enq_bits_fflags_bits_uop_lrs3_0, io_enq_bits_fflags_bits_uop_lrs2_0, io_enq_bits_fflags_bits_uop_lrs1_0, io_enq_bits_fflags_bits_uop_ldst_0, io_enq_bits_fflags_bits_uop_ldst_is_rs1_0, io_enq_bits_fflags_bits_uop_flush_on_commit_0, io_enq_bits_fflags_bits_uop_is_unique_0, io_enq_bits_fflags_bits_uop_is_sys_pc2epc_0, io_enq_bits_fflags_bits_uop_uses_stq_0, io_enq_bits_fflags_bits_uop_uses_ldq_0, io_enq_bits_fflags_bits_uop_is_amo_0, io_enq_bits_fflags_bits_uop_is_fencei_0, io_enq_bits_fflags_bits_uop_is_fence_0, io_enq_bits_fflags_bits_uop_mem_signed_0, io_enq_bits_fflags_bits_uop_mem_size_0, io_enq_bits_fflags_bits_uop_mem_cmd_0, io_enq_bits_fflags_bits_uop_bypassable_0, io_enq_bits_fflags_bits_uop_exc_cause_0, io_enq_bits_fflags_bits_uop_exception_0, io_enq_bits_fflags_bits_uop_stale_pdst_0, io_enq_bits_fflags_bits_uop_ppred_busy_0, io_enq_bits_fflags_bits_uop_prs3_busy_0, io_enq_bits_fflags_bits_uop_prs2_busy_0, io_enq_bits_fflags_bits_uop_prs1_busy_0, io_enq_bits_fflags_bits_uop_ppred_0, io_enq_bits_fflags_bits_uop_prs3_0, io_enq_bits_fflags_bits_uop_prs2_0, io_enq_bits_fflags_bits_uop_prs1_0, io_enq_bits_fflags_bits_uop_pdst_0, io_enq_bits_fflags_bits_uop_rxq_idx_0, io_enq_bits_fflags_bits_uop_stq_idx_0, io_enq_bits_fflags_bits_uop_ldq_idx_0, io_enq_bits_fflags_bits_uop_rob_idx_0, io_enq_bits_fflags_bits_uop_csr_addr_0, io_enq_bits_fflags_bits_uop_imm_packed_0, io_enq_bits_fflags_bits_uop_taken_0, io_enq_bits_fflags_bits_uop_pc_lob_0, io_enq_bits_fflags_bits_uop_edge_inst_0, io_enq_bits_fflags_bits_uop_ftq_idx_0, io_enq_bits_fflags_bits_uop_br_tag_0, io_enq_bits_fflags_bits_uop_br_mask_0, io_enq_bits_fflags_bits_uop_is_sfb_0, io_enq_bits_fflags_bits_uop_is_jal_0, io_enq_bits_fflags_bits_uop_is_jalr_0, io_enq_bits_fflags_bits_uop_is_br_0, io_enq_bits_fflags_bits_uop_iw_p2_poisoned_0, io_enq_bits_fflags_bits_uop_iw_p1_poisoned_0, io_enq_bits_fflags_bits_uop_iw_state_0, io_enq_bits_fflags_bits_uop_ctrl_is_std_0, io_enq_bits_fflags_bits_uop_ctrl_is_sta_0, io_enq_bits_fflags_bits_uop_ctrl_is_load_0, io_enq_bits_fflags_bits_uop_ctrl_csr_cmd_0, io_enq_bits_fflags_bits_uop_ctrl_fcn_dw_0, io_enq_bits_fflags_bits_uop_ctrl_op_fcn_0, io_enq_bits_fflags_bits_uop_ctrl_imm_sel_0, io_enq_bits_fflags_bits_uop_ctrl_op2_sel_0, io_enq_bits_fflags_bits_uop_ctrl_op1_sel_0, io_enq_bits_fflags_bits_uop_ctrl_br_type_0, io_enq_bits_fflags_bits_uop_fu_code_0, io_enq_bits_fflags_bits_uop_iq_type_0, io_enq_bits_fflags_bits_uop_debug_pc_0, io_enq_bits_fflags_bits_uop_is_rvc_0, io_enq_bits_fflags_bits_uop_debug_inst_0, io_enq_bits_fflags_bits_uop_inst_0, io_enq_bits_fflags_bits_uop_uopc_0, io_enq_bits_fflags_valid_0, 1'h0, io_enq_bits_data_0}) // @[util.scala:448:7, :464:20] ); // @[util.scala:464:20] assign io_enq_ready = io_enq_ready_0; // @[util.scala:448:7] assign io_deq_valid = io_deq_valid_0; // @[util.scala:448:7] assign io_deq_bits_uop_uopc = io_deq_bits_uop_uopc_0; // @[util.scala:448:7] assign io_deq_bits_uop_inst = io_deq_bits_uop_inst_0; // @[util.scala:448:7] assign io_deq_bits_uop_debug_inst = io_deq_bits_uop_debug_inst_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_rvc = io_deq_bits_uop_is_rvc_0; // @[util.scala:448:7] assign io_deq_bits_uop_debug_pc = io_deq_bits_uop_debug_pc_0; // @[util.scala:448:7] assign io_deq_bits_uop_iq_type = io_deq_bits_uop_iq_type_0; // @[util.scala:448:7] assign io_deq_bits_uop_fu_code = io_deq_bits_uop_fu_code_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_br_type = io_deq_bits_uop_ctrl_br_type_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_op1_sel = io_deq_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_op2_sel = io_deq_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_imm_sel = io_deq_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_op_fcn = io_deq_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_fcn_dw = io_deq_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_csr_cmd = io_deq_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_is_load = io_deq_bits_uop_ctrl_is_load_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_is_sta = io_deq_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7] assign io_deq_bits_uop_ctrl_is_std = io_deq_bits_uop_ctrl_is_std_0; // @[util.scala:448:7] assign io_deq_bits_uop_iw_state = io_deq_bits_uop_iw_state_0; // @[util.scala:448:7] assign io_deq_bits_uop_iw_p1_poisoned = io_deq_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7] assign io_deq_bits_uop_iw_p2_poisoned = io_deq_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_br = io_deq_bits_uop_is_br_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_jalr = io_deq_bits_uop_is_jalr_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_jal = io_deq_bits_uop_is_jal_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_sfb = io_deq_bits_uop_is_sfb_0; // @[util.scala:448:7] assign io_deq_bits_uop_br_mask = io_deq_bits_uop_br_mask_0; // @[util.scala:448:7] assign io_deq_bits_uop_br_tag = io_deq_bits_uop_br_tag_0; // @[util.scala:448:7] assign io_deq_bits_uop_ftq_idx = io_deq_bits_uop_ftq_idx_0; // @[util.scala:448:7] assign io_deq_bits_uop_edge_inst = io_deq_bits_uop_edge_inst_0; // @[util.scala:448:7] assign io_deq_bits_uop_pc_lob = io_deq_bits_uop_pc_lob_0; // @[util.scala:448:7] assign io_deq_bits_uop_taken = io_deq_bits_uop_taken_0; // @[util.scala:448:7] assign io_deq_bits_uop_imm_packed = io_deq_bits_uop_imm_packed_0; // @[util.scala:448:7] assign io_deq_bits_uop_csr_addr = io_deq_bits_uop_csr_addr_0; // @[util.scala:448:7] assign io_deq_bits_uop_rob_idx = io_deq_bits_uop_rob_idx_0; // @[util.scala:448:7] assign io_deq_bits_uop_ldq_idx = io_deq_bits_uop_ldq_idx_0; // @[util.scala:448:7] assign io_deq_bits_uop_stq_idx = io_deq_bits_uop_stq_idx_0; // @[util.scala:448:7] assign io_deq_bits_uop_rxq_idx = io_deq_bits_uop_rxq_idx_0; // @[util.scala:448:7] assign io_deq_bits_uop_pdst = io_deq_bits_uop_pdst_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs1 = io_deq_bits_uop_prs1_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs2 = io_deq_bits_uop_prs2_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs3 = io_deq_bits_uop_prs3_0; // @[util.scala:448:7] assign io_deq_bits_uop_ppred = io_deq_bits_uop_ppred_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs1_busy = io_deq_bits_uop_prs1_busy_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs2_busy = io_deq_bits_uop_prs2_busy_0; // @[util.scala:448:7] assign io_deq_bits_uop_prs3_busy = io_deq_bits_uop_prs3_busy_0; // @[util.scala:448:7] assign io_deq_bits_uop_ppred_busy = io_deq_bits_uop_ppred_busy_0; // @[util.scala:448:7] assign io_deq_bits_uop_stale_pdst = io_deq_bits_uop_stale_pdst_0; // @[util.scala:448:7] assign io_deq_bits_uop_exception = io_deq_bits_uop_exception_0; // @[util.scala:448:7] assign io_deq_bits_uop_exc_cause = io_deq_bits_uop_exc_cause_0; // @[util.scala:448:7] assign io_deq_bits_uop_bypassable = io_deq_bits_uop_bypassable_0; // @[util.scala:448:7] assign io_deq_bits_uop_mem_cmd = io_deq_bits_uop_mem_cmd_0; // @[util.scala:448:7] assign io_deq_bits_uop_mem_size = io_deq_bits_uop_mem_size_0; // @[util.scala:448:7] assign io_deq_bits_uop_mem_signed = io_deq_bits_uop_mem_signed_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_fence = io_deq_bits_uop_is_fence_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_fencei = io_deq_bits_uop_is_fencei_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_amo = io_deq_bits_uop_is_amo_0; // @[util.scala:448:7] assign io_deq_bits_uop_uses_ldq = io_deq_bits_uop_uses_ldq_0; // @[util.scala:448:7] assign io_deq_bits_uop_uses_stq = io_deq_bits_uop_uses_stq_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_sys_pc2epc = io_deq_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7] assign io_deq_bits_uop_is_unique = io_deq_bits_uop_is_unique_0; // @[util.scala:448:7] assign io_deq_bits_uop_flush_on_commit = io_deq_bits_uop_flush_on_commit_0; // @[util.scala:448:7] assign io_deq_bits_uop_ldst_is_rs1 = io_deq_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7] assign io_deq_bits_uop_ldst = io_deq_bits_uop_ldst_0; // @[util.scala:448:7] assign io_deq_bits_uop_lrs1 = io_deq_bits_uop_lrs1_0; // @[util.scala:448:7] assign io_deq_bits_uop_lrs2 = io_deq_bits_uop_lrs2_0; // @[util.scala:448:7] assign io_deq_bits_uop_lrs3 = io_deq_bits_uop_lrs3_0; // @[util.scala:448:7] assign io_deq_bits_uop_ldst_val = io_deq_bits_uop_ldst_val_0; // @[util.scala:448:7] assign io_deq_bits_uop_dst_rtype = io_deq_bits_uop_dst_rtype_0; // @[util.scala:448:7] assign io_deq_bits_uop_lrs1_rtype = io_deq_bits_uop_lrs1_rtype_0; // @[util.scala:448:7] assign io_deq_bits_uop_lrs2_rtype = io_deq_bits_uop_lrs2_rtype_0; // @[util.scala:448:7] assign io_deq_bits_uop_frs3_en = io_deq_bits_uop_frs3_en_0; // @[util.scala:448:7] assign io_deq_bits_uop_fp_val = io_deq_bits_uop_fp_val_0; // @[util.scala:448:7] assign io_deq_bits_uop_fp_single = io_deq_bits_uop_fp_single_0; // @[util.scala:448:7] assign io_deq_bits_uop_xcpt_pf_if = io_deq_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7] assign io_deq_bits_uop_xcpt_ae_if = io_deq_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7] assign io_deq_bits_uop_xcpt_ma_if = io_deq_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7] assign io_deq_bits_uop_bp_debug_if = io_deq_bits_uop_bp_debug_if_0; // @[util.scala:448:7] assign io_deq_bits_uop_bp_xcpt_if = io_deq_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7] assign io_deq_bits_uop_debug_fsrc = io_deq_bits_uop_debug_fsrc_0; // @[util.scala:448:7] assign io_deq_bits_uop_debug_tsrc = io_deq_bits_uop_debug_tsrc_0; // @[util.scala:448:7] assign io_deq_bits_data = io_deq_bits_data_0; // @[util.scala:448:7] assign io_deq_bits_predicated = io_deq_bits_predicated_0; // @[util.scala:448:7] assign io_deq_bits_fflags_valid = io_deq_bits_fflags_valid_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_uopc = io_deq_bits_fflags_bits_uop_uopc_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_inst = io_deq_bits_fflags_bits_uop_inst_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_debug_inst = io_deq_bits_fflags_bits_uop_debug_inst_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_is_rvc = io_deq_bits_fflags_bits_uop_is_rvc_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_debug_pc = io_deq_bits_fflags_bits_uop_debug_pc_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_iq_type = io_deq_bits_fflags_bits_uop_iq_type_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_fu_code = io_deq_bits_fflags_bits_uop_fu_code_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ctrl_br_type = io_deq_bits_fflags_bits_uop_ctrl_br_type_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ctrl_op1_sel = io_deq_bits_fflags_bits_uop_ctrl_op1_sel_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ctrl_op2_sel = io_deq_bits_fflags_bits_uop_ctrl_op2_sel_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ctrl_imm_sel = io_deq_bits_fflags_bits_uop_ctrl_imm_sel_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ctrl_op_fcn = io_deq_bits_fflags_bits_uop_ctrl_op_fcn_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ctrl_fcn_dw = io_deq_bits_fflags_bits_uop_ctrl_fcn_dw_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ctrl_csr_cmd = io_deq_bits_fflags_bits_uop_ctrl_csr_cmd_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ctrl_is_load = io_deq_bits_fflags_bits_uop_ctrl_is_load_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ctrl_is_sta = io_deq_bits_fflags_bits_uop_ctrl_is_sta_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ctrl_is_std = io_deq_bits_fflags_bits_uop_ctrl_is_std_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_iw_state = io_deq_bits_fflags_bits_uop_iw_state_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_iw_p1_poisoned = io_deq_bits_fflags_bits_uop_iw_p1_poisoned_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_iw_p2_poisoned = io_deq_bits_fflags_bits_uop_iw_p2_poisoned_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_is_br = io_deq_bits_fflags_bits_uop_is_br_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_is_jalr = io_deq_bits_fflags_bits_uop_is_jalr_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_is_jal = io_deq_bits_fflags_bits_uop_is_jal_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_is_sfb = io_deq_bits_fflags_bits_uop_is_sfb_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_br_mask = io_deq_bits_fflags_bits_uop_br_mask_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_br_tag = io_deq_bits_fflags_bits_uop_br_tag_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ftq_idx = io_deq_bits_fflags_bits_uop_ftq_idx_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_edge_inst = io_deq_bits_fflags_bits_uop_edge_inst_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_pc_lob = io_deq_bits_fflags_bits_uop_pc_lob_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_taken = io_deq_bits_fflags_bits_uop_taken_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_imm_packed = io_deq_bits_fflags_bits_uop_imm_packed_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_csr_addr = io_deq_bits_fflags_bits_uop_csr_addr_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_rob_idx = io_deq_bits_fflags_bits_uop_rob_idx_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ldq_idx = io_deq_bits_fflags_bits_uop_ldq_idx_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_stq_idx = io_deq_bits_fflags_bits_uop_stq_idx_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_rxq_idx = io_deq_bits_fflags_bits_uop_rxq_idx_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_pdst = io_deq_bits_fflags_bits_uop_pdst_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_prs1 = io_deq_bits_fflags_bits_uop_prs1_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_prs2 = io_deq_bits_fflags_bits_uop_prs2_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_prs3 = io_deq_bits_fflags_bits_uop_prs3_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ppred = io_deq_bits_fflags_bits_uop_ppred_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_prs1_busy = io_deq_bits_fflags_bits_uop_prs1_busy_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_prs2_busy = io_deq_bits_fflags_bits_uop_prs2_busy_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_prs3_busy = io_deq_bits_fflags_bits_uop_prs3_busy_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ppred_busy = io_deq_bits_fflags_bits_uop_ppred_busy_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_stale_pdst = io_deq_bits_fflags_bits_uop_stale_pdst_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_exception = io_deq_bits_fflags_bits_uop_exception_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_exc_cause = io_deq_bits_fflags_bits_uop_exc_cause_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_bypassable = io_deq_bits_fflags_bits_uop_bypassable_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_mem_cmd = io_deq_bits_fflags_bits_uop_mem_cmd_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_mem_size = io_deq_bits_fflags_bits_uop_mem_size_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_mem_signed = io_deq_bits_fflags_bits_uop_mem_signed_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_is_fence = io_deq_bits_fflags_bits_uop_is_fence_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_is_fencei = io_deq_bits_fflags_bits_uop_is_fencei_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_is_amo = io_deq_bits_fflags_bits_uop_is_amo_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_uses_ldq = io_deq_bits_fflags_bits_uop_uses_ldq_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_uses_stq = io_deq_bits_fflags_bits_uop_uses_stq_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_is_sys_pc2epc = io_deq_bits_fflags_bits_uop_is_sys_pc2epc_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_is_unique = io_deq_bits_fflags_bits_uop_is_unique_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_flush_on_commit = io_deq_bits_fflags_bits_uop_flush_on_commit_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ldst_is_rs1 = io_deq_bits_fflags_bits_uop_ldst_is_rs1_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ldst = io_deq_bits_fflags_bits_uop_ldst_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_lrs1 = io_deq_bits_fflags_bits_uop_lrs1_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_lrs2 = io_deq_bits_fflags_bits_uop_lrs2_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_lrs3 = io_deq_bits_fflags_bits_uop_lrs3_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_ldst_val = io_deq_bits_fflags_bits_uop_ldst_val_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_dst_rtype = io_deq_bits_fflags_bits_uop_dst_rtype_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_lrs1_rtype = io_deq_bits_fflags_bits_uop_lrs1_rtype_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_lrs2_rtype = io_deq_bits_fflags_bits_uop_lrs2_rtype_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_frs3_en = io_deq_bits_fflags_bits_uop_frs3_en_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_fp_val = io_deq_bits_fflags_bits_uop_fp_val_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_fp_single = io_deq_bits_fflags_bits_uop_fp_single_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_xcpt_pf_if = io_deq_bits_fflags_bits_uop_xcpt_pf_if_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_xcpt_ae_if = io_deq_bits_fflags_bits_uop_xcpt_ae_if_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_xcpt_ma_if = io_deq_bits_fflags_bits_uop_xcpt_ma_if_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_bp_debug_if = io_deq_bits_fflags_bits_uop_bp_debug_if_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_bp_xcpt_if = io_deq_bits_fflags_bits_uop_bp_xcpt_if_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_debug_fsrc = io_deq_bits_fflags_bits_uop_debug_fsrc_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_uop_debug_tsrc = io_deq_bits_fflags_bits_uop_debug_tsrc_0; // @[util.scala:448:7] assign io_deq_bits_fflags_bits_flags = io_deq_bits_fflags_bits_flags_0; // @[util.scala:448:7] assign io_empty = io_empty_0; // @[util.scala:448:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_23( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [5:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [26:0] _GEN = {23'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [8:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [3:0] size; // @[Monitor.scala:389:22] reg [3:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] reg [8:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg [3:0] source_1; // @[Monitor.scala:541:22] reg [5:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [15:0] inflight; // @[Monitor.scala:614:27] reg [63:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [127:0] inflight_sizes; // @[Monitor.scala:618:33] reg [8:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] reg [8:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire [15:0] _GEN_0 = {12'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_1 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_2 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [15:0] _GEN_3 = {12'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [15:0] inflight_1; // @[Monitor.scala:726:35] reg [127:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [8:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File AMOALU.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters class StoreGen(typ: UInt, addr: UInt, dat: UInt, maxSize: Int) { val size = Wire(UInt(log2Up(log2Up(maxSize)+1).W)) size := typ val dat_padded = dat.pad(maxSize*8) def misaligned: Bool = (addr & ((1.U << size) - 1.U)(log2Up(maxSize)-1,0)).orR def mask = { var res = 1.U for (i <- 0 until log2Up(maxSize)) { val upper = Mux(addr(i), res, 0.U) | Mux(size >= (i+1).U, ((BigInt(1) << (1 << i))-1).U, 0.U) val lower = Mux(addr(i), 0.U, res) res = Cat(upper, lower) } res } protected def genData(i: Int): UInt = if (i >= log2Up(maxSize)) dat_padded else Mux(size === i.U, Fill(1 << (log2Up(maxSize)-i), dat_padded((8 << i)-1,0)), genData(i+1)) def data = genData(0) def wordData = genData(2) } class LoadGen(typ: UInt, signed: Bool, addr: UInt, dat: UInt, zero: Bool, maxSize: Int) { private val size = new StoreGen(typ, addr, dat, maxSize).size private def genData(logMinSize: Int): UInt = { var res = dat for (i <- log2Up(maxSize)-1 to logMinSize by -1) { val pos = 8 << i val shifted = Mux(addr(i), res(2*pos-1,pos), res(pos-1,0)) val doZero = (i == 0).B && zero val zeroed = Mux(doZero, 0.U, shifted) res = Cat(Mux(size === i.U || doZero, Fill(8*maxSize-pos, signed && zeroed(pos-1)), res(8*maxSize-1,pos)), zeroed) } res } def wordData = genData(2) def data = genData(0) } class AMOALU(operandBits: Int)(implicit p: Parameters) extends Module { val minXLen = 32 val widths = (0 to log2Ceil(operandBits / minXLen)).map(minXLen << _) val io = IO(new Bundle { val mask = Input(UInt((operandBits / 8).W)) val cmd = Input(UInt(M_SZ.W)) val lhs = Input(UInt(operandBits.W)) val rhs = Input(UInt(operandBits.W)) val out = Output(UInt(operandBits.W)) val out_unmasked = Output(UInt(operandBits.W)) }) val max = io.cmd === M_XA_MAX || io.cmd === M_XA_MAXU val min = io.cmd === M_XA_MIN || io.cmd === M_XA_MINU val add = io.cmd === M_XA_ADD val logic_and = io.cmd === M_XA_OR || io.cmd === M_XA_AND val logic_xor = io.cmd === M_XA_XOR || io.cmd === M_XA_OR val adder_out = { // partition the carry chain to support sub-xLen addition val mask = ~(0.U(operandBits.W) +: widths.init.map(w => !io.mask(w/8-1) << (w-1))).reduce(_|_) (io.lhs & mask) + (io.rhs & mask) } val less = { // break up the comparator so the lower parts will be CSE'd def isLessUnsigned(x: UInt, y: UInt, n: Int): Bool = { if (n == minXLen) x(n-1, 0) < y(n-1, 0) else x(n-1, n/2) < y(n-1, n/2) || x(n-1, n/2) === y(n-1, n/2) && isLessUnsigned(x, y, n/2) } def isLess(x: UInt, y: UInt, n: Int): Bool = { val signed = { val mask = M_XA_MIN ^ M_XA_MINU (io.cmd & mask) === (M_XA_MIN & mask) } Mux(x(n-1) === y(n-1), isLessUnsigned(x, y, n), Mux(signed, x(n-1), y(n-1))) } PriorityMux(widths.reverse.map(w => (io.mask(w/8/2), isLess(io.lhs, io.rhs, w)))) } val minmax = Mux(Mux(less, min, max), io.lhs, io.rhs) val logic = Mux(logic_and, io.lhs & io.rhs, 0.U) | Mux(logic_xor, io.lhs ^ io.rhs, 0.U) val out = Mux(add, adder_out, Mux(logic_and || logic_xor, logic, minmax)) val wmask = FillInterleaved(8, io.mask) io.out := wmask & out | ~wmask & io.lhs io.out_unmasked := out }
module AMOALU( // @[AMOALU.scala:54:7] input clock, // @[AMOALU.scala:54:7] input reset, // @[AMOALU.scala:54:7] input [3:0] io_mask, // @[AMOALU.scala:58:14] input [4:0] io_cmd, // @[AMOALU.scala:58:14] input [31:0] io_lhs, // @[AMOALU.scala:58:14] input [31:0] io_rhs, // @[AMOALU.scala:58:14] output [31:0] io_out_unmasked // @[AMOALU.scala:58:14] ); wire [3:0] io_mask_0 = io_mask; // @[AMOALU.scala:54:7] wire [4:0] io_cmd_0 = io_cmd; // @[AMOALU.scala:54:7] wire [31:0] io_lhs_0 = io_lhs; // @[AMOALU.scala:54:7] wire [31:0] io_rhs_0 = io_rhs; // @[AMOALU.scala:54:7] wire [31:0] adder_out_mask = 32'hFFFFFFFF; // @[AMOALU.scala:75:16] wire [3:0] less_signed_mask = 4'h2; // @[AMOALU.scala:88:29] wire [3:0] _less_signed_T_1 = 4'h0; // @[AMOALU.scala:89:39] wire [31:0] _adder_out_T = io_lhs_0; // @[AMOALU.scala:54:7, :76:13] wire [31:0] _less_T_4 = io_lhs_0; // @[AMOALU.scala:54:7, :82:26] wire [31:0] _adder_out_T_1 = io_rhs_0; // @[AMOALU.scala:54:7, :76:31] wire [31:0] _less_T_5 = io_rhs_0; // @[AMOALU.scala:54:7, :82:38] wire [31:0] _io_out_T_3; // @[AMOALU.scala:107:25] wire [31:0] out; // @[AMOALU.scala:102:8] wire [31:0] io_out; // @[AMOALU.scala:54:7] wire [31:0] io_out_unmasked_0; // @[AMOALU.scala:54:7] wire _max_T = io_cmd_0 == 5'hD; // @[AMOALU.scala:54:7, :67:20] wire _max_T_1 = io_cmd_0 == 5'hF; // @[AMOALU.scala:54:7, :67:43] wire max = _max_T | _max_T_1; // @[AMOALU.scala:67:{20,33,43}] wire _min_T = io_cmd_0 == 5'hC; // @[AMOALU.scala:54:7, :68:20] wire _min_T_1 = io_cmd_0 == 5'hE; // @[AMOALU.scala:54:7, :68:43] wire min = _min_T | _min_T_1; // @[AMOALU.scala:68:{20,33,43}] wire add = io_cmd_0 == 5'h8; // @[AMOALU.scala:54:7, :69:20] wire _GEN = io_cmd_0 == 5'hA; // @[AMOALU.scala:54:7, :70:26] wire _logic_and_T; // @[AMOALU.scala:70:26] assign _logic_and_T = _GEN; // @[AMOALU.scala:70:26] wire _logic_xor_T_1; // @[AMOALU.scala:71:49] assign _logic_xor_T_1 = _GEN; // @[AMOALU.scala:70:26, :71:49] wire _logic_and_T_1 = io_cmd_0 == 5'hB; // @[AMOALU.scala:54:7, :70:48] wire logic_and = _logic_and_T | _logic_and_T_1; // @[AMOALU.scala:70:{26,38,48}] wire _logic_xor_T = io_cmd_0 == 5'h9; // @[AMOALU.scala:54:7, :71:26] wire logic_xor = _logic_xor_T | _logic_xor_T_1; // @[AMOALU.scala:71:{26,39,49}] wire [32:0] _adder_out_T_2 = {1'h0, _adder_out_T} + {1'h0, _adder_out_T_1}; // @[AMOALU.scala:76:{13,21,31}] wire [31:0] adder_out = _adder_out_T_2[31:0]; // @[AMOALU.scala:76:21] wire _less_T = io_mask_0[2]; // @[AMOALU.scala:54:7, :94:49] wire _wmask_T_2 = io_mask_0[2]; // @[AMOALU.scala:54:7, :94:49, :106:30] wire [4:0] _less_signed_T = {3'h0, io_cmd_0[1], 1'h0}; // @[AMOALU.scala:54:7, :89:17] wire less_signed = _less_signed_T == 5'h0; // @[AMOALU.scala:89:{17,25}] wire _less_T_1 = io_lhs_0[31]; // @[AMOALU.scala:54:7, :91:12] wire _less_T_7 = io_lhs_0[31]; // @[AMOALU.scala:54:7, :91:{12,68}] wire _less_T_2 = io_rhs_0[31]; // @[AMOALU.scala:54:7, :91:23] wire _less_T_8 = io_rhs_0[31]; // @[AMOALU.scala:54:7, :91:{23,76}] wire _less_T_3 = _less_T_1 == _less_T_2; // @[AMOALU.scala:91:{12,18,23}] wire _less_T_6 = _less_T_4 < _less_T_5; // @[AMOALU.scala:82:{26,35,38}] wire _less_T_9 = less_signed ? _less_T_7 : _less_T_8; // @[AMOALU.scala:89:25, :91:{58,68,76}] wire less = _less_T_3 ? _less_T_6 : _less_T_9; // @[AMOALU.scala:82:35, :91:{10,18,58}] wire _minmax_T = less ? min : max; // @[AMOALU.scala:67:33, :68:33, :91:10, :97:23] wire [31:0] minmax = _minmax_T ? io_lhs_0 : io_rhs_0; // @[AMOALU.scala:54:7, :97:{19,23}] wire [31:0] _logic_T = io_lhs_0 & io_rhs_0; // @[AMOALU.scala:54:7, :99:27] wire [31:0] _logic_T_1 = logic_and ? _logic_T : 32'h0; // @[AMOALU.scala:70:38, :99:{8,27}] wire [31:0] _logic_T_2 = io_lhs_0 ^ io_rhs_0; // @[AMOALU.scala:54:7, :100:27] wire [31:0] _logic_T_3 = logic_xor ? _logic_T_2 : 32'h0; // @[AMOALU.scala:71:39, :100:{8,27}] wire [31:0] logic_0 = _logic_T_1 | _logic_T_3; // @[AMOALU.scala:99:{8,42}, :100:8] wire _out_T = logic_and | logic_xor; // @[AMOALU.scala:70:38, :71:39, :103:19] wire [31:0] _out_T_1 = _out_T ? logic_0 : minmax; // @[AMOALU.scala:97:19, :99:42, :103:{8,19}] assign out = add ? adder_out : _out_T_1; // @[AMOALU.scala:69:20, :76:21, :102:8, :103:8] assign io_out_unmasked_0 = out; // @[AMOALU.scala:54:7, :102:8] wire _wmask_T = io_mask_0[0]; // @[AMOALU.scala:54:7, :106:30] wire _wmask_T_1 = io_mask_0[1]; // @[AMOALU.scala:54:7, :106:30] wire _wmask_T_3 = io_mask_0[3]; // @[AMOALU.scala:54:7, :106:30] wire [7:0] _wmask_T_4 = {8{_wmask_T}}; // @[AMOALU.scala:106:30] wire [7:0] _wmask_T_5 = {8{_wmask_T_1}}; // @[AMOALU.scala:106:30] wire [7:0] _wmask_T_6 = {8{_wmask_T_2}}; // @[AMOALU.scala:106:30] wire [7:0] _wmask_T_7 = {8{_wmask_T_3}}; // @[AMOALU.scala:106:30] wire [15:0] wmask_lo = {_wmask_T_5, _wmask_T_4}; // @[AMOALU.scala:106:30] wire [15:0] wmask_hi = {_wmask_T_7, _wmask_T_6}; // @[AMOALU.scala:106:30] wire [31:0] wmask = {wmask_hi, wmask_lo}; // @[AMOALU.scala:106:30] wire [31:0] _io_out_T = wmask & out; // @[AMOALU.scala:102:8, :106:30, :107:19] wire [31:0] _io_out_T_1 = ~wmask; // @[AMOALU.scala:106:30, :107:27] wire [31:0] _io_out_T_2 = _io_out_T_1 & io_lhs_0; // @[AMOALU.scala:54:7, :107:{27,34}] assign _io_out_T_3 = _io_out_T | _io_out_T_2; // @[AMOALU.scala:107:{19,25,34}] assign io_out = _io_out_T_3; // @[AMOALU.scala:54:7, :107:25] assign io_out_unmasked = io_out_unmasked_0; // @[AMOALU.scala:54:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File UserYanker.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.amba.axi4 import chisel3._ import chisel3.util.{Queue, QueueIO, UIntToOH} import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.lazymodule.{LazyModule, LazyModuleImp} import freechips.rocketchip.util.BundleMap /** This adapter prunes all user bit fields of the echo type from request messages, * storing them in queues and echoing them back when matching response messages are received. * * It also optionally rate limits the number of transactions that can be in flight simultaneously * per FIFO domain / A[W|R]ID. * * @param capMaxFlight is an optional maximum number of transactions that can be in flight per A[W|R]ID. */ class AXI4UserYanker(capMaxFlight: Option[Int] = None)(implicit p: Parameters) extends LazyModule { val node = AXI4AdapterNode( masterFn = { mp => mp.copy( masters = mp.masters.map { m => m.copy( maxFlight = (m.maxFlight, capMaxFlight) match { case (Some(x), Some(y)) => Some(x min y) case (Some(x), None) => Some(x) case (None, Some(y)) => Some(y) case (None, None) => None })}, echoFields = Nil)}, slaveFn = { sp => sp }) lazy val module = new Impl class Impl extends LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => // Which fields are we stripping? val echoFields = edgeIn.master.echoFields val need_bypass = edgeOut.slave.minLatency < 1 edgeOut.master.masters.foreach { m => require (m.maxFlight.isDefined, "UserYanker needs a flight cap on each ID") } def queue(id: Int) = { val depth = edgeOut.master.masters.find(_.id.contains(id)).flatMap(_.maxFlight).getOrElse(0) if (depth == 0) { Wire(new QueueIO(BundleMap(echoFields), 1)) // unused ID => undefined value } else { Module(new Queue(BundleMap(echoFields), depth, flow=need_bypass)).io } } val rqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) } val wqueues = Seq.tabulate(edgeIn.master.endId) { i => queue(i) } val arid = in.ar.bits.id val ar_ready = VecInit(rqueues.map(_.enq.ready))(arid) in .ar.ready := out.ar.ready && ar_ready out.ar.valid := in .ar.valid && ar_ready Connectable.waiveUnmatched(out.ar.bits, in.ar.bits) match { case (lhs, rhs) => lhs :<= rhs } val rid = out.r.bits.id val r_valid = VecInit(rqueues.map(_.deq.valid))(rid) val r_bits = VecInit(rqueues.map(_.deq.bits))(rid) assert (!out.r.valid || r_valid) // Q must be ready faster than the response Connectable.waiveUnmatched(in.r, out.r) match { case (lhs, rhs) => lhs :<>= rhs } in.r.bits.echo :<= r_bits val arsel = UIntToOH(arid, edgeIn.master.endId).asBools val rsel = UIntToOH(rid, edgeIn.master.endId).asBools (rqueues zip (arsel zip rsel)) foreach { case (q, (ar, r)) => q.deq.ready := out.r .valid && in .r .ready && r && out.r.bits.last q.deq.valid := DontCare q.deq.bits := DontCare q.enq.valid := in .ar.valid && out.ar.ready && ar q.enq.ready := DontCare q.enq.bits :<>= in.ar.bits.echo q.count := DontCare } val awid = in.aw.bits.id val aw_ready = VecInit(wqueues.map(_.enq.ready))(awid) in .aw.ready := out.aw.ready && aw_ready out.aw.valid := in .aw.valid && aw_ready Connectable.waiveUnmatched(out.aw.bits, in.aw.bits) match { case (lhs, rhs) => lhs :<>= rhs } val bid = out.b.bits.id val b_valid = VecInit(wqueues.map(_.deq.valid))(bid) val b_bits = VecInit(wqueues.map(_.deq.bits))(bid) assert (!out.b.valid || b_valid) // Q must be ready faster than the response Connectable.waiveUnmatched(in.b, out.b) match { case (lhs, rhs) => lhs :<>= rhs } in.b.bits.echo :<>= b_bits val awsel = UIntToOH(awid, edgeIn.master.endId).asBools val bsel = UIntToOH(bid, edgeIn.master.endId).asBools (wqueues zip (awsel zip bsel)) foreach { case (q, (aw, b)) => q.deq.ready := out.b .valid && in .b .ready && b q.deq.valid := DontCare q.deq.bits := DontCare q.enq.valid := in .aw.valid && out.aw.ready && aw q.enq.ready := DontCare q.enq.bits :<>= in.aw.bits.echo q.count := DontCare } out.w :<>= in.w } } } object AXI4UserYanker { def apply(capMaxFlight: Option[Int] = None)(implicit p: Parameters): AXI4Node = { val axi4yank = LazyModule(new AXI4UserYanker(capMaxFlight)) axi4yank.node } }
module AXI4UserYanker_1( // @[UserYanker.scala:36:9] input clock, // @[UserYanker.scala:36:9] input reset, // @[UserYanker.scala:36:9] output auto_in_aw_ready, // @[LazyModuleImp.scala:107:25] input auto_in_aw_valid, // @[LazyModuleImp.scala:107:25] input [6:0] auto_in_aw_bits_id, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_aw_bits_addr, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_aw_bits_len, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_aw_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_in_aw_bits_burst, // @[LazyModuleImp.scala:107:25] input auto_in_aw_bits_lock, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_aw_bits_cache, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_aw_bits_prot, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_aw_bits_qos, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_aw_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_aw_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25] input auto_in_aw_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] output auto_in_w_ready, // @[LazyModuleImp.scala:107:25] input auto_in_w_valid, // @[LazyModuleImp.scala:107:25] input [63:0] auto_in_w_bits_data, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_w_bits_strb, // @[LazyModuleImp.scala:107:25] input auto_in_w_bits_last, // @[LazyModuleImp.scala:107:25] input auto_in_b_ready, // @[LazyModuleImp.scala:107:25] output auto_in_b_valid, // @[LazyModuleImp.scala:107:25] output [6:0] auto_in_b_bits_id, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_b_bits_resp, // @[LazyModuleImp.scala:107:25] output [3:0] auto_in_b_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25] output [7:0] auto_in_b_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25] output auto_in_b_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] output auto_in_ar_ready, // @[LazyModuleImp.scala:107:25] input auto_in_ar_valid, // @[LazyModuleImp.scala:107:25] input [6:0] auto_in_ar_bits_id, // @[LazyModuleImp.scala:107:25] input [31:0] auto_in_ar_bits_addr, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_ar_bits_len, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_ar_bits_size, // @[LazyModuleImp.scala:107:25] input [1:0] auto_in_ar_bits_burst, // @[LazyModuleImp.scala:107:25] input auto_in_ar_bits_lock, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_ar_bits_cache, // @[LazyModuleImp.scala:107:25] input [2:0] auto_in_ar_bits_prot, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_ar_bits_qos, // @[LazyModuleImp.scala:107:25] input [3:0] auto_in_ar_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25] input [7:0] auto_in_ar_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25] input auto_in_ar_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] input auto_in_r_ready, // @[LazyModuleImp.scala:107:25] output auto_in_r_valid, // @[LazyModuleImp.scala:107:25] output [6:0] auto_in_r_bits_id, // @[LazyModuleImp.scala:107:25] output [63:0] auto_in_r_bits_data, // @[LazyModuleImp.scala:107:25] output [1:0] auto_in_r_bits_resp, // @[LazyModuleImp.scala:107:25] output [3:0] auto_in_r_bits_echo_tl_state_size, // @[LazyModuleImp.scala:107:25] output [7:0] auto_in_r_bits_echo_tl_state_source, // @[LazyModuleImp.scala:107:25] output auto_in_r_bits_echo_extra_id, // @[LazyModuleImp.scala:107:25] output auto_in_r_bits_last, // @[LazyModuleImp.scala:107:25] input auto_out_aw_ready, // @[LazyModuleImp.scala:107:25] output auto_out_aw_valid, // @[LazyModuleImp.scala:107:25] output [6:0] auto_out_aw_bits_id, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_aw_bits_addr, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_aw_bits_len, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_aw_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_out_aw_bits_burst, // @[LazyModuleImp.scala:107:25] output auto_out_aw_bits_lock, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_aw_bits_cache, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_aw_bits_prot, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_aw_bits_qos, // @[LazyModuleImp.scala:107:25] input auto_out_w_ready, // @[LazyModuleImp.scala:107:25] output auto_out_w_valid, // @[LazyModuleImp.scala:107:25] output [63:0] auto_out_w_bits_data, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_w_bits_strb, // @[LazyModuleImp.scala:107:25] output auto_out_w_bits_last, // @[LazyModuleImp.scala:107:25] output auto_out_b_ready, // @[LazyModuleImp.scala:107:25] input auto_out_b_valid, // @[LazyModuleImp.scala:107:25] input [6:0] auto_out_b_bits_id, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_b_bits_resp, // @[LazyModuleImp.scala:107:25] input auto_out_ar_ready, // @[LazyModuleImp.scala:107:25] output auto_out_ar_valid, // @[LazyModuleImp.scala:107:25] output [6:0] auto_out_ar_bits_id, // @[LazyModuleImp.scala:107:25] output [31:0] auto_out_ar_bits_addr, // @[LazyModuleImp.scala:107:25] output [7:0] auto_out_ar_bits_len, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_ar_bits_size, // @[LazyModuleImp.scala:107:25] output [1:0] auto_out_ar_bits_burst, // @[LazyModuleImp.scala:107:25] output auto_out_ar_bits_lock, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_ar_bits_cache, // @[LazyModuleImp.scala:107:25] output [2:0] auto_out_ar_bits_prot, // @[LazyModuleImp.scala:107:25] output [3:0] auto_out_ar_bits_qos, // @[LazyModuleImp.scala:107:25] output auto_out_r_ready, // @[LazyModuleImp.scala:107:25] input auto_out_r_valid, // @[LazyModuleImp.scala:107:25] input [6:0] auto_out_r_bits_id, // @[LazyModuleImp.scala:107:25] input [63:0] auto_out_r_bits_data, // @[LazyModuleImp.scala:107:25] input [1:0] auto_out_r_bits_resp, // @[LazyModuleImp.scala:107:25] input auto_out_r_bits_last // @[LazyModuleImp.scala:107:25] ); wire auto_in_aw_valid_0 = auto_in_aw_valid; // @[UserYanker.scala:36:9] wire [6:0] auto_in_aw_bits_id_0 = auto_in_aw_bits_id; // @[UserYanker.scala:36:9] wire [31:0] auto_in_aw_bits_addr_0 = auto_in_aw_bits_addr; // @[UserYanker.scala:36:9] wire [7:0] auto_in_aw_bits_len_0 = auto_in_aw_bits_len; // @[UserYanker.scala:36:9] wire [2:0] auto_in_aw_bits_size_0 = auto_in_aw_bits_size; // @[UserYanker.scala:36:9] wire [1:0] auto_in_aw_bits_burst_0 = auto_in_aw_bits_burst; // @[UserYanker.scala:36:9] wire auto_in_aw_bits_lock_0 = auto_in_aw_bits_lock; // @[UserYanker.scala:36:9] wire [3:0] auto_in_aw_bits_cache_0 = auto_in_aw_bits_cache; // @[UserYanker.scala:36:9] wire [2:0] auto_in_aw_bits_prot_0 = auto_in_aw_bits_prot; // @[UserYanker.scala:36:9] wire [3:0] auto_in_aw_bits_qos_0 = auto_in_aw_bits_qos; // @[UserYanker.scala:36:9] wire [3:0] auto_in_aw_bits_echo_tl_state_size_0 = auto_in_aw_bits_echo_tl_state_size; // @[UserYanker.scala:36:9] wire [7:0] auto_in_aw_bits_echo_tl_state_source_0 = auto_in_aw_bits_echo_tl_state_source; // @[UserYanker.scala:36:9] wire auto_in_aw_bits_echo_extra_id_0 = auto_in_aw_bits_echo_extra_id; // @[UserYanker.scala:36:9] wire auto_in_w_valid_0 = auto_in_w_valid; // @[UserYanker.scala:36:9] wire [63:0] auto_in_w_bits_data_0 = auto_in_w_bits_data; // @[UserYanker.scala:36:9] wire [7:0] auto_in_w_bits_strb_0 = auto_in_w_bits_strb; // @[UserYanker.scala:36:9] wire auto_in_w_bits_last_0 = auto_in_w_bits_last; // @[UserYanker.scala:36:9] wire auto_in_b_ready_0 = auto_in_b_ready; // @[UserYanker.scala:36:9] wire auto_in_ar_valid_0 = auto_in_ar_valid; // @[UserYanker.scala:36:9] wire [6:0] auto_in_ar_bits_id_0 = auto_in_ar_bits_id; // @[UserYanker.scala:36:9] wire [31:0] auto_in_ar_bits_addr_0 = auto_in_ar_bits_addr; // @[UserYanker.scala:36:9] wire [7:0] auto_in_ar_bits_len_0 = auto_in_ar_bits_len; // @[UserYanker.scala:36:9] wire [2:0] auto_in_ar_bits_size_0 = auto_in_ar_bits_size; // @[UserYanker.scala:36:9] wire [1:0] auto_in_ar_bits_burst_0 = auto_in_ar_bits_burst; // @[UserYanker.scala:36:9] wire auto_in_ar_bits_lock_0 = auto_in_ar_bits_lock; // @[UserYanker.scala:36:9] wire [3:0] auto_in_ar_bits_cache_0 = auto_in_ar_bits_cache; // @[UserYanker.scala:36:9] wire [2:0] auto_in_ar_bits_prot_0 = auto_in_ar_bits_prot; // @[UserYanker.scala:36:9] wire [3:0] auto_in_ar_bits_qos_0 = auto_in_ar_bits_qos; // @[UserYanker.scala:36:9] wire [3:0] auto_in_ar_bits_echo_tl_state_size_0 = auto_in_ar_bits_echo_tl_state_size; // @[UserYanker.scala:36:9] wire [7:0] auto_in_ar_bits_echo_tl_state_source_0 = auto_in_ar_bits_echo_tl_state_source; // @[UserYanker.scala:36:9] wire auto_in_ar_bits_echo_extra_id_0 = auto_in_ar_bits_echo_extra_id; // @[UserYanker.scala:36:9] wire auto_in_r_ready_0 = auto_in_r_ready; // @[UserYanker.scala:36:9] wire auto_out_aw_ready_0 = auto_out_aw_ready; // @[UserYanker.scala:36:9] wire auto_out_w_ready_0 = auto_out_w_ready; // @[UserYanker.scala:36:9] wire auto_out_b_valid_0 = auto_out_b_valid; // @[UserYanker.scala:36:9] wire [6:0] auto_out_b_bits_id_0 = auto_out_b_bits_id; // @[UserYanker.scala:36:9] wire [1:0] auto_out_b_bits_resp_0 = auto_out_b_bits_resp; // @[UserYanker.scala:36:9] wire auto_out_ar_ready_0 = auto_out_ar_ready; // @[UserYanker.scala:36:9] wire auto_out_r_valid_0 = auto_out_r_valid; // @[UserYanker.scala:36:9] wire [6:0] auto_out_r_bits_id_0 = auto_out_r_bits_id; // @[UserYanker.scala:36:9] wire [63:0] auto_out_r_bits_data_0 = auto_out_r_bits_data; // @[UserYanker.scala:36:9] wire [1:0] auto_out_r_bits_resp_0 = auto_out_r_bits_resp; // @[UserYanker.scala:36:9] wire auto_out_r_bits_last_0 = auto_out_r_bits_last; // @[UserYanker.scala:36:9] wire nodeIn_aw_ready; // @[MixedNode.scala:551:17] wire nodeIn_aw_valid = auto_in_aw_valid_0; // @[UserYanker.scala:36:9] wire [6:0] nodeIn_aw_bits_id = auto_in_aw_bits_id_0; // @[UserYanker.scala:36:9] wire [31:0] nodeIn_aw_bits_addr = auto_in_aw_bits_addr_0; // @[UserYanker.scala:36:9] wire [7:0] nodeIn_aw_bits_len = auto_in_aw_bits_len_0; // @[UserYanker.scala:36:9] wire [2:0] nodeIn_aw_bits_size = auto_in_aw_bits_size_0; // @[UserYanker.scala:36:9] wire [1:0] nodeIn_aw_bits_burst = auto_in_aw_bits_burst_0; // @[UserYanker.scala:36:9] wire nodeIn_aw_bits_lock = auto_in_aw_bits_lock_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_aw_bits_cache = auto_in_aw_bits_cache_0; // @[UserYanker.scala:36:9] wire [2:0] nodeIn_aw_bits_prot = auto_in_aw_bits_prot_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_aw_bits_qos = auto_in_aw_bits_qos_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_aw_bits_echo_tl_state_size = auto_in_aw_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9] wire [7:0] nodeIn_aw_bits_echo_tl_state_source = auto_in_aw_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9] wire nodeIn_aw_bits_echo_extra_id = auto_in_aw_bits_echo_extra_id_0; // @[UserYanker.scala:36:9] wire nodeIn_w_ready; // @[MixedNode.scala:551:17] wire nodeIn_w_valid = auto_in_w_valid_0; // @[UserYanker.scala:36:9] wire [63:0] nodeIn_w_bits_data = auto_in_w_bits_data_0; // @[UserYanker.scala:36:9] wire [7:0] nodeIn_w_bits_strb = auto_in_w_bits_strb_0; // @[UserYanker.scala:36:9] wire nodeIn_w_bits_last = auto_in_w_bits_last_0; // @[UserYanker.scala:36:9] wire nodeIn_b_ready = auto_in_b_ready_0; // @[UserYanker.scala:36:9] wire nodeIn_b_valid; // @[MixedNode.scala:551:17] wire [6:0] nodeIn_b_bits_id; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_b_bits_resp; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_b_bits_echo_tl_state_size; // @[MixedNode.scala:551:17] wire [7:0] nodeIn_b_bits_echo_tl_state_source; // @[MixedNode.scala:551:17] wire nodeIn_b_bits_echo_extra_id; // @[MixedNode.scala:551:17] wire nodeIn_ar_ready; // @[MixedNode.scala:551:17] wire nodeIn_ar_valid = auto_in_ar_valid_0; // @[UserYanker.scala:36:9] wire [6:0] nodeIn_ar_bits_id = auto_in_ar_bits_id_0; // @[UserYanker.scala:36:9] wire [31:0] nodeIn_ar_bits_addr = auto_in_ar_bits_addr_0; // @[UserYanker.scala:36:9] wire [7:0] nodeIn_ar_bits_len = auto_in_ar_bits_len_0; // @[UserYanker.scala:36:9] wire [2:0] nodeIn_ar_bits_size = auto_in_ar_bits_size_0; // @[UserYanker.scala:36:9] wire [1:0] nodeIn_ar_bits_burst = auto_in_ar_bits_burst_0; // @[UserYanker.scala:36:9] wire nodeIn_ar_bits_lock = auto_in_ar_bits_lock_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_ar_bits_cache = auto_in_ar_bits_cache_0; // @[UserYanker.scala:36:9] wire [2:0] nodeIn_ar_bits_prot = auto_in_ar_bits_prot_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_ar_bits_qos = auto_in_ar_bits_qos_0; // @[UserYanker.scala:36:9] wire [3:0] nodeIn_ar_bits_echo_tl_state_size = auto_in_ar_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9] wire [7:0] nodeIn_ar_bits_echo_tl_state_source = auto_in_ar_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9] wire nodeIn_ar_bits_echo_extra_id = auto_in_ar_bits_echo_extra_id_0; // @[UserYanker.scala:36:9] wire nodeIn_r_ready = auto_in_r_ready_0; // @[UserYanker.scala:36:9] wire nodeIn_r_valid; // @[MixedNode.scala:551:17] wire [6:0] nodeIn_r_bits_id; // @[MixedNode.scala:551:17] wire [63:0] nodeIn_r_bits_data; // @[MixedNode.scala:551:17] wire [1:0] nodeIn_r_bits_resp; // @[MixedNode.scala:551:17] wire [3:0] nodeIn_r_bits_echo_tl_state_size; // @[MixedNode.scala:551:17] wire [7:0] nodeIn_r_bits_echo_tl_state_source; // @[MixedNode.scala:551:17] wire nodeIn_r_bits_echo_extra_id; // @[MixedNode.scala:551:17] wire nodeIn_r_bits_last; // @[MixedNode.scala:551:17] wire nodeOut_aw_ready = auto_out_aw_ready_0; // @[UserYanker.scala:36:9] wire nodeOut_aw_valid; // @[MixedNode.scala:542:17] wire [6:0] nodeOut_aw_bits_id; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_aw_bits_addr; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_aw_bits_len; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_aw_bits_size; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_aw_bits_burst; // @[MixedNode.scala:542:17] wire nodeOut_aw_bits_lock; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_aw_bits_cache; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_aw_bits_prot; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_aw_bits_qos; // @[MixedNode.scala:542:17] wire nodeOut_w_ready = auto_out_w_ready_0; // @[UserYanker.scala:36:9] wire nodeOut_w_valid; // @[MixedNode.scala:542:17] wire [63:0] nodeOut_w_bits_data; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_w_bits_strb; // @[MixedNode.scala:542:17] wire nodeOut_w_bits_last; // @[MixedNode.scala:542:17] wire nodeOut_b_ready; // @[MixedNode.scala:542:17] wire nodeOut_b_valid = auto_out_b_valid_0; // @[UserYanker.scala:36:9] wire [6:0] nodeOut_b_bits_id = auto_out_b_bits_id_0; // @[UserYanker.scala:36:9] wire [1:0] nodeOut_b_bits_resp = auto_out_b_bits_resp_0; // @[UserYanker.scala:36:9] wire nodeOut_ar_ready = auto_out_ar_ready_0; // @[UserYanker.scala:36:9] wire nodeOut_ar_valid; // @[MixedNode.scala:542:17] wire [6:0] nodeOut_ar_bits_id; // @[MixedNode.scala:542:17] wire [31:0] nodeOut_ar_bits_addr; // @[MixedNode.scala:542:17] wire [7:0] nodeOut_ar_bits_len; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_ar_bits_size; // @[MixedNode.scala:542:17] wire [1:0] nodeOut_ar_bits_burst; // @[MixedNode.scala:542:17] wire nodeOut_ar_bits_lock; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_ar_bits_cache; // @[MixedNode.scala:542:17] wire [2:0] nodeOut_ar_bits_prot; // @[MixedNode.scala:542:17] wire [3:0] nodeOut_ar_bits_qos; // @[MixedNode.scala:542:17] wire nodeOut_r_ready; // @[MixedNode.scala:542:17] wire nodeOut_r_valid = auto_out_r_valid_0; // @[UserYanker.scala:36:9] wire [6:0] nodeOut_r_bits_id = auto_out_r_bits_id_0; // @[UserYanker.scala:36:9] wire [63:0] nodeOut_r_bits_data = auto_out_r_bits_data_0; // @[UserYanker.scala:36:9] wire [1:0] nodeOut_r_bits_resp = auto_out_r_bits_resp_0; // @[UserYanker.scala:36:9] wire nodeOut_r_bits_last = auto_out_r_bits_last_0; // @[UserYanker.scala:36:9] wire auto_in_aw_ready_0; // @[UserYanker.scala:36:9] wire auto_in_w_ready_0; // @[UserYanker.scala:36:9] wire [3:0] auto_in_b_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9] wire [7:0] auto_in_b_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9] wire auto_in_b_bits_echo_extra_id_0; // @[UserYanker.scala:36:9] wire [6:0] auto_in_b_bits_id_0; // @[UserYanker.scala:36:9] wire [1:0] auto_in_b_bits_resp_0; // @[UserYanker.scala:36:9] wire auto_in_b_valid_0; // @[UserYanker.scala:36:9] wire auto_in_ar_ready_0; // @[UserYanker.scala:36:9] wire [3:0] auto_in_r_bits_echo_tl_state_size_0; // @[UserYanker.scala:36:9] wire [7:0] auto_in_r_bits_echo_tl_state_source_0; // @[UserYanker.scala:36:9] wire auto_in_r_bits_echo_extra_id_0; // @[UserYanker.scala:36:9] wire [6:0] auto_in_r_bits_id_0; // @[UserYanker.scala:36:9] wire [63:0] auto_in_r_bits_data_0; // @[UserYanker.scala:36:9] wire [1:0] auto_in_r_bits_resp_0; // @[UserYanker.scala:36:9] wire auto_in_r_bits_last_0; // @[UserYanker.scala:36:9] wire auto_in_r_valid_0; // @[UserYanker.scala:36:9] wire [6:0] auto_out_aw_bits_id_0; // @[UserYanker.scala:36:9] wire [31:0] auto_out_aw_bits_addr_0; // @[UserYanker.scala:36:9] wire [7:0] auto_out_aw_bits_len_0; // @[UserYanker.scala:36:9] wire [2:0] auto_out_aw_bits_size_0; // @[UserYanker.scala:36:9] wire [1:0] auto_out_aw_bits_burst_0; // @[UserYanker.scala:36:9] wire auto_out_aw_bits_lock_0; // @[UserYanker.scala:36:9] wire [3:0] auto_out_aw_bits_cache_0; // @[UserYanker.scala:36:9] wire [2:0] auto_out_aw_bits_prot_0; // @[UserYanker.scala:36:9] wire [3:0] auto_out_aw_bits_qos_0; // @[UserYanker.scala:36:9] wire auto_out_aw_valid_0; // @[UserYanker.scala:36:9] wire [63:0] auto_out_w_bits_data_0; // @[UserYanker.scala:36:9] wire [7:0] auto_out_w_bits_strb_0; // @[UserYanker.scala:36:9] wire auto_out_w_bits_last_0; // @[UserYanker.scala:36:9] wire auto_out_w_valid_0; // @[UserYanker.scala:36:9] wire auto_out_b_ready_0; // @[UserYanker.scala:36:9] wire [6:0] auto_out_ar_bits_id_0; // @[UserYanker.scala:36:9] wire [31:0] auto_out_ar_bits_addr_0; // @[UserYanker.scala:36:9] wire [7:0] auto_out_ar_bits_len_0; // @[UserYanker.scala:36:9] wire [2:0] auto_out_ar_bits_size_0; // @[UserYanker.scala:36:9] wire [1:0] auto_out_ar_bits_burst_0; // @[UserYanker.scala:36:9] wire auto_out_ar_bits_lock_0; // @[UserYanker.scala:36:9] wire [3:0] auto_out_ar_bits_cache_0; // @[UserYanker.scala:36:9] wire [2:0] auto_out_ar_bits_prot_0; // @[UserYanker.scala:36:9] wire [3:0] auto_out_ar_bits_qos_0; // @[UserYanker.scala:36:9] wire auto_out_ar_valid_0; // @[UserYanker.scala:36:9] wire auto_out_r_ready_0; // @[UserYanker.scala:36:9] wire _nodeIn_aw_ready_T; // @[UserYanker.scala:89:36] assign auto_in_aw_ready_0 = nodeIn_aw_ready; // @[UserYanker.scala:36:9] assign nodeOut_aw_bits_id = nodeIn_aw_bits_id; // @[MixedNode.scala:542:17, :551:17] wire [6:0] awsel_shiftAmount = nodeIn_aw_bits_id; // @[OneHot.scala:64:49] assign nodeOut_aw_bits_addr = nodeIn_aw_bits_addr; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_len = nodeIn_aw_bits_len; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_size = nodeIn_aw_bits_size; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_burst = nodeIn_aw_bits_burst; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_lock = nodeIn_aw_bits_lock; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_cache = nodeIn_aw_bits_cache; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_prot = nodeIn_aw_bits_prot; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_aw_bits_qos = nodeIn_aw_bits_qos; // @[MixedNode.scala:542:17, :551:17] assign auto_in_w_ready_0 = nodeIn_w_ready; // @[UserYanker.scala:36:9] assign nodeOut_w_valid = nodeIn_w_valid; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_w_bits_data = nodeIn_w_bits_data; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_w_bits_strb = nodeIn_w_bits_strb; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_w_bits_last = nodeIn_w_bits_last; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_b_ready = nodeIn_b_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_in_b_valid_0 = nodeIn_b_valid; // @[UserYanker.scala:36:9] assign auto_in_b_bits_id_0 = nodeIn_b_bits_id; // @[UserYanker.scala:36:9] assign auto_in_b_bits_resp_0 = nodeIn_b_bits_resp; // @[UserYanker.scala:36:9] assign auto_in_b_bits_echo_tl_state_size_0 = nodeIn_b_bits_echo_tl_state_size; // @[UserYanker.scala:36:9] assign auto_in_b_bits_echo_tl_state_source_0 = nodeIn_b_bits_echo_tl_state_source; // @[UserYanker.scala:36:9] assign auto_in_b_bits_echo_extra_id_0 = nodeIn_b_bits_echo_extra_id; // @[UserYanker.scala:36:9] wire _nodeIn_ar_ready_T; // @[UserYanker.scala:60:36] assign auto_in_ar_ready_0 = nodeIn_ar_ready; // @[UserYanker.scala:36:9] assign nodeOut_ar_bits_id = nodeIn_ar_bits_id; // @[MixedNode.scala:542:17, :551:17] wire [6:0] arsel_shiftAmount = nodeIn_ar_bits_id; // @[OneHot.scala:64:49] assign nodeOut_ar_bits_addr = nodeIn_ar_bits_addr; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_len = nodeIn_ar_bits_len; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_size = nodeIn_ar_bits_size; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_burst = nodeIn_ar_bits_burst; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_lock = nodeIn_ar_bits_lock; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_cache = nodeIn_ar_bits_cache; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_prot = nodeIn_ar_bits_prot; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_ar_bits_qos = nodeIn_ar_bits_qos; // @[MixedNode.scala:542:17, :551:17] assign nodeOut_r_ready = nodeIn_r_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_in_r_valid_0 = nodeIn_r_valid; // @[UserYanker.scala:36:9] assign auto_in_r_bits_id_0 = nodeIn_r_bits_id; // @[UserYanker.scala:36:9] assign auto_in_r_bits_data_0 = nodeIn_r_bits_data; // @[UserYanker.scala:36:9] assign auto_in_r_bits_resp_0 = nodeIn_r_bits_resp; // @[UserYanker.scala:36:9] assign auto_in_r_bits_echo_tl_state_size_0 = nodeIn_r_bits_echo_tl_state_size; // @[UserYanker.scala:36:9] assign auto_in_r_bits_echo_tl_state_source_0 = nodeIn_r_bits_echo_tl_state_source; // @[UserYanker.scala:36:9] assign auto_in_r_bits_echo_extra_id_0 = nodeIn_r_bits_echo_extra_id; // @[UserYanker.scala:36:9] assign auto_in_r_bits_last_0 = nodeIn_r_bits_last; // @[UserYanker.scala:36:9] wire _nodeOut_aw_valid_T; // @[UserYanker.scala:90:36] assign auto_out_aw_valid_0 = nodeOut_aw_valid; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_id_0 = nodeOut_aw_bits_id; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_addr_0 = nodeOut_aw_bits_addr; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_len_0 = nodeOut_aw_bits_len; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_size_0 = nodeOut_aw_bits_size; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_burst_0 = nodeOut_aw_bits_burst; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_lock_0 = nodeOut_aw_bits_lock; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_cache_0 = nodeOut_aw_bits_cache; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_prot_0 = nodeOut_aw_bits_prot; // @[UserYanker.scala:36:9] assign auto_out_aw_bits_qos_0 = nodeOut_aw_bits_qos; // @[UserYanker.scala:36:9] assign nodeIn_w_ready = nodeOut_w_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_out_w_valid_0 = nodeOut_w_valid; // @[UserYanker.scala:36:9] assign auto_out_w_bits_data_0 = nodeOut_w_bits_data; // @[UserYanker.scala:36:9] assign auto_out_w_bits_strb_0 = nodeOut_w_bits_strb; // @[UserYanker.scala:36:9] assign auto_out_w_bits_last_0 = nodeOut_w_bits_last; // @[UserYanker.scala:36:9] assign auto_out_b_ready_0 = nodeOut_b_ready; // @[UserYanker.scala:36:9] assign nodeIn_b_valid = nodeOut_b_valid; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_b_bits_id = nodeOut_b_bits_id; // @[MixedNode.scala:542:17, :551:17] wire [6:0] bsel_shiftAmount = nodeOut_b_bits_id; // @[OneHot.scala:64:49] assign nodeIn_b_bits_resp = nodeOut_b_bits_resp; // @[MixedNode.scala:542:17, :551:17] wire _nodeOut_ar_valid_T; // @[UserYanker.scala:61:36] assign auto_out_ar_valid_0 = nodeOut_ar_valid; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_id_0 = nodeOut_ar_bits_id; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_addr_0 = nodeOut_ar_bits_addr; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_len_0 = nodeOut_ar_bits_len; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_size_0 = nodeOut_ar_bits_size; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_burst_0 = nodeOut_ar_bits_burst; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_lock_0 = nodeOut_ar_bits_lock; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_cache_0 = nodeOut_ar_bits_cache; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_prot_0 = nodeOut_ar_bits_prot; // @[UserYanker.scala:36:9] assign auto_out_ar_bits_qos_0 = nodeOut_ar_bits_qos; // @[UserYanker.scala:36:9] assign auto_out_r_ready_0 = nodeOut_r_ready; // @[UserYanker.scala:36:9] assign nodeIn_r_valid = nodeOut_r_valid; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_r_bits_id = nodeOut_r_bits_id; // @[MixedNode.scala:542:17, :551:17] wire [6:0] rsel_shiftAmount = nodeOut_r_bits_id; // @[OneHot.scala:64:49] assign nodeIn_r_bits_data = nodeOut_r_bits_data; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_r_bits_resp = nodeOut_r_bits_resp; // @[MixedNode.scala:542:17, :551:17] assign nodeIn_r_bits_last = nodeOut_r_bits_last; // @[MixedNode.scala:542:17, :551:17] wire _ar_ready_WIRE_0; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_1; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_2; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_3; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_4; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_5; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_6; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_7; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_8; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_9; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_10; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_11; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_12; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_13; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_14; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_15; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_16; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_17; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_18; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_19; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_20; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_21; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_22; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_23; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_24; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_25; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_26; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_27; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_28; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_29; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_30; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_31; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_32; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_33; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_34; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_35; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_36; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_37; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_38; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_39; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_40; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_41; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_42; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_43; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_44; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_45; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_46; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_47; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_48; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_49; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_50; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_51; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_52; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_53; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_54; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_55; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_56; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_57; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_58; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_59; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_60; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_61; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_62; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_63; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_64; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_65; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_66; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_67; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_68; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_69; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_70; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_71; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_72; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_73; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_74; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_75; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_76; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_77; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_78; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_79; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_80; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_81; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_82; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_83; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_84; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_85; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_86; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_87; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_88; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_89; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_90; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_91; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_92; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_93; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_94; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_95; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_96; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_97; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_98; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_99; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_100; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_101; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_102; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_103; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_104; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_105; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_106; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_107; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_108; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_109; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_110; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_111; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_112; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_113; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_114; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_115; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_116; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_117; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_118; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_119; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_120; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_121; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_122; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_123; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_124; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_125; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_126; // @[UserYanker.scala:59:29] wire _ar_ready_WIRE_127; // @[UserYanker.scala:59:29] wire [127:0] _GEN = {{_ar_ready_WIRE_127}, {_ar_ready_WIRE_126}, {_ar_ready_WIRE_125}, {_ar_ready_WIRE_124}, {_ar_ready_WIRE_123}, {_ar_ready_WIRE_122}, {_ar_ready_WIRE_121}, {_ar_ready_WIRE_120}, {_ar_ready_WIRE_119}, {_ar_ready_WIRE_118}, {_ar_ready_WIRE_117}, {_ar_ready_WIRE_116}, {_ar_ready_WIRE_115}, {_ar_ready_WIRE_114}, {_ar_ready_WIRE_113}, {_ar_ready_WIRE_112}, {_ar_ready_WIRE_111}, {_ar_ready_WIRE_110}, {_ar_ready_WIRE_109}, {_ar_ready_WIRE_108}, {_ar_ready_WIRE_107}, {_ar_ready_WIRE_106}, {_ar_ready_WIRE_105}, {_ar_ready_WIRE_104}, {_ar_ready_WIRE_103}, {_ar_ready_WIRE_102}, {_ar_ready_WIRE_101}, {_ar_ready_WIRE_100}, {_ar_ready_WIRE_99}, {_ar_ready_WIRE_98}, {_ar_ready_WIRE_97}, {_ar_ready_WIRE_96}, {_ar_ready_WIRE_95}, {_ar_ready_WIRE_94}, {_ar_ready_WIRE_93}, {_ar_ready_WIRE_92}, {_ar_ready_WIRE_91}, {_ar_ready_WIRE_90}, {_ar_ready_WIRE_89}, {_ar_ready_WIRE_88}, {_ar_ready_WIRE_87}, {_ar_ready_WIRE_86}, {_ar_ready_WIRE_85}, {_ar_ready_WIRE_84}, {_ar_ready_WIRE_83}, {_ar_ready_WIRE_82}, {_ar_ready_WIRE_81}, {_ar_ready_WIRE_80}, {_ar_ready_WIRE_79}, {_ar_ready_WIRE_78}, {_ar_ready_WIRE_77}, {_ar_ready_WIRE_76}, {_ar_ready_WIRE_75}, {_ar_ready_WIRE_74}, {_ar_ready_WIRE_73}, {_ar_ready_WIRE_72}, {_ar_ready_WIRE_71}, {_ar_ready_WIRE_70}, {_ar_ready_WIRE_69}, {_ar_ready_WIRE_68}, {_ar_ready_WIRE_67}, {_ar_ready_WIRE_66}, {_ar_ready_WIRE_65}, {_ar_ready_WIRE_64}, {_ar_ready_WIRE_63}, {_ar_ready_WIRE_62}, {_ar_ready_WIRE_61}, {_ar_ready_WIRE_60}, {_ar_ready_WIRE_59}, {_ar_ready_WIRE_58}, {_ar_ready_WIRE_57}, {_ar_ready_WIRE_56}, {_ar_ready_WIRE_55}, {_ar_ready_WIRE_54}, {_ar_ready_WIRE_53}, {_ar_ready_WIRE_52}, {_ar_ready_WIRE_51}, {_ar_ready_WIRE_50}, {_ar_ready_WIRE_49}, {_ar_ready_WIRE_48}, {_ar_ready_WIRE_47}, {_ar_ready_WIRE_46}, {_ar_ready_WIRE_45}, {_ar_ready_WIRE_44}, {_ar_ready_WIRE_43}, {_ar_ready_WIRE_42}, {_ar_ready_WIRE_41}, {_ar_ready_WIRE_40}, {_ar_ready_WIRE_39}, {_ar_ready_WIRE_38}, {_ar_ready_WIRE_37}, {_ar_ready_WIRE_36}, {_ar_ready_WIRE_35}, {_ar_ready_WIRE_34}, {_ar_ready_WIRE_33}, {_ar_ready_WIRE_32}, {_ar_ready_WIRE_31}, {_ar_ready_WIRE_30}, {_ar_ready_WIRE_29}, {_ar_ready_WIRE_28}, {_ar_ready_WIRE_27}, {_ar_ready_WIRE_26}, {_ar_ready_WIRE_25}, {_ar_ready_WIRE_24}, {_ar_ready_WIRE_23}, {_ar_ready_WIRE_22}, {_ar_ready_WIRE_21}, {_ar_ready_WIRE_20}, {_ar_ready_WIRE_19}, {_ar_ready_WIRE_18}, {_ar_ready_WIRE_17}, {_ar_ready_WIRE_16}, {_ar_ready_WIRE_15}, {_ar_ready_WIRE_14}, {_ar_ready_WIRE_13}, {_ar_ready_WIRE_12}, {_ar_ready_WIRE_11}, {_ar_ready_WIRE_10}, {_ar_ready_WIRE_9}, {_ar_ready_WIRE_8}, {_ar_ready_WIRE_7}, {_ar_ready_WIRE_6}, {_ar_ready_WIRE_5}, {_ar_ready_WIRE_4}, {_ar_ready_WIRE_3}, {_ar_ready_WIRE_2}, {_ar_ready_WIRE_1}, {_ar_ready_WIRE_0}}; // @[UserYanker.scala:59:29, :60:36] assign _nodeIn_ar_ready_T = nodeOut_ar_ready & _GEN[nodeIn_ar_bits_id]; // @[UserYanker.scala:60:36] assign nodeIn_ar_ready = _nodeIn_ar_ready_T; // @[UserYanker.scala:60:36] assign _nodeOut_ar_valid_T = nodeIn_ar_valid & _GEN[nodeIn_ar_bits_id]; // @[UserYanker.scala:60:36, :61:36] assign nodeOut_ar_valid = _nodeOut_ar_valid_T; // @[UserYanker.scala:61:36] wire [3:0] _r_bits_WIRE_0_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_1_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_2_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_3_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_4_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_5_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_6_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_7_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_8_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_9_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_10_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_11_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_12_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_13_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_14_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_15_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_16_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_17_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_18_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_19_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_20_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_21_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_22_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_23_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_24_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_25_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_26_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_27_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_28_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_29_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_30_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_31_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_32_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_33_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_34_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_35_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_36_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_37_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_38_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_39_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_40_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_41_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_42_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_43_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_44_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_45_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_46_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_47_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_48_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_49_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_50_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_51_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_52_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_53_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_54_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_55_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_56_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_57_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_58_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_59_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_60_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_61_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_62_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_63_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_64_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_65_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_66_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_67_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_68_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_69_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_70_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_71_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_72_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_73_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_74_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_75_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_76_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_77_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_78_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_79_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_80_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_81_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_82_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_83_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_84_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_85_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_86_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_87_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_88_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_89_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_90_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_91_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_92_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_93_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_94_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_95_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_96_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_97_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_98_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_99_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_100_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_101_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_102_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_103_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_104_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_105_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_106_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_107_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_108_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_109_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_110_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_111_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_112_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_113_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_114_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_115_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_116_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_117_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_118_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_119_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_120_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_121_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_122_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_123_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_124_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_125_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_126_tl_state_size; // @[UserYanker.scala:68:27] wire [3:0] _r_bits_WIRE_127_tl_state_size; // @[UserYanker.scala:68:27] wire [127:0][3:0] _GEN_0 = {{_r_bits_WIRE_127_tl_state_size}, {_r_bits_WIRE_126_tl_state_size}, {_r_bits_WIRE_125_tl_state_size}, {_r_bits_WIRE_124_tl_state_size}, {_r_bits_WIRE_123_tl_state_size}, {_r_bits_WIRE_122_tl_state_size}, {_r_bits_WIRE_121_tl_state_size}, {_r_bits_WIRE_120_tl_state_size}, {_r_bits_WIRE_119_tl_state_size}, {_r_bits_WIRE_118_tl_state_size}, {_r_bits_WIRE_117_tl_state_size}, {_r_bits_WIRE_116_tl_state_size}, {_r_bits_WIRE_115_tl_state_size}, {_r_bits_WIRE_114_tl_state_size}, {_r_bits_WIRE_113_tl_state_size}, {_r_bits_WIRE_112_tl_state_size}, {_r_bits_WIRE_111_tl_state_size}, {_r_bits_WIRE_110_tl_state_size}, {_r_bits_WIRE_109_tl_state_size}, {_r_bits_WIRE_108_tl_state_size}, {_r_bits_WIRE_107_tl_state_size}, {_r_bits_WIRE_106_tl_state_size}, {_r_bits_WIRE_105_tl_state_size}, {_r_bits_WIRE_104_tl_state_size}, {_r_bits_WIRE_103_tl_state_size}, {_r_bits_WIRE_102_tl_state_size}, {_r_bits_WIRE_101_tl_state_size}, {_r_bits_WIRE_100_tl_state_size}, {_r_bits_WIRE_99_tl_state_size}, {_r_bits_WIRE_98_tl_state_size}, {_r_bits_WIRE_97_tl_state_size}, {_r_bits_WIRE_96_tl_state_size}, {_r_bits_WIRE_95_tl_state_size}, {_r_bits_WIRE_94_tl_state_size}, {_r_bits_WIRE_93_tl_state_size}, {_r_bits_WIRE_92_tl_state_size}, {_r_bits_WIRE_91_tl_state_size}, {_r_bits_WIRE_90_tl_state_size}, {_r_bits_WIRE_89_tl_state_size}, {_r_bits_WIRE_88_tl_state_size}, {_r_bits_WIRE_87_tl_state_size}, {_r_bits_WIRE_86_tl_state_size}, {_r_bits_WIRE_85_tl_state_size}, {_r_bits_WIRE_84_tl_state_size}, {_r_bits_WIRE_83_tl_state_size}, {_r_bits_WIRE_82_tl_state_size}, {_r_bits_WIRE_81_tl_state_size}, {_r_bits_WIRE_80_tl_state_size}, {_r_bits_WIRE_79_tl_state_size}, {_r_bits_WIRE_78_tl_state_size}, {_r_bits_WIRE_77_tl_state_size}, {_r_bits_WIRE_76_tl_state_size}, {_r_bits_WIRE_75_tl_state_size}, {_r_bits_WIRE_74_tl_state_size}, {_r_bits_WIRE_73_tl_state_size}, {_r_bits_WIRE_72_tl_state_size}, {_r_bits_WIRE_71_tl_state_size}, {_r_bits_WIRE_70_tl_state_size}, {_r_bits_WIRE_69_tl_state_size}, {_r_bits_WIRE_68_tl_state_size}, {_r_bits_WIRE_67_tl_state_size}, {_r_bits_WIRE_66_tl_state_size}, {_r_bits_WIRE_65_tl_state_size}, {_r_bits_WIRE_64_tl_state_size}, {_r_bits_WIRE_63_tl_state_size}, {_r_bits_WIRE_62_tl_state_size}, {_r_bits_WIRE_61_tl_state_size}, {_r_bits_WIRE_60_tl_state_size}, {_r_bits_WIRE_59_tl_state_size}, {_r_bits_WIRE_58_tl_state_size}, {_r_bits_WIRE_57_tl_state_size}, {_r_bits_WIRE_56_tl_state_size}, {_r_bits_WIRE_55_tl_state_size}, {_r_bits_WIRE_54_tl_state_size}, {_r_bits_WIRE_53_tl_state_size}, {_r_bits_WIRE_52_tl_state_size}, {_r_bits_WIRE_51_tl_state_size}, {_r_bits_WIRE_50_tl_state_size}, {_r_bits_WIRE_49_tl_state_size}, {_r_bits_WIRE_48_tl_state_size}, {_r_bits_WIRE_47_tl_state_size}, {_r_bits_WIRE_46_tl_state_size}, {_r_bits_WIRE_45_tl_state_size}, {_r_bits_WIRE_44_tl_state_size}, {_r_bits_WIRE_43_tl_state_size}, {_r_bits_WIRE_42_tl_state_size}, {_r_bits_WIRE_41_tl_state_size}, {_r_bits_WIRE_40_tl_state_size}, {_r_bits_WIRE_39_tl_state_size}, {_r_bits_WIRE_38_tl_state_size}, {_r_bits_WIRE_37_tl_state_size}, {_r_bits_WIRE_36_tl_state_size}, {_r_bits_WIRE_35_tl_state_size}, {_r_bits_WIRE_34_tl_state_size}, {_r_bits_WIRE_33_tl_state_size}, {_r_bits_WIRE_32_tl_state_size}, {_r_bits_WIRE_31_tl_state_size}, {_r_bits_WIRE_30_tl_state_size}, {_r_bits_WIRE_29_tl_state_size}, {_r_bits_WIRE_28_tl_state_size}, {_r_bits_WIRE_27_tl_state_size}, {_r_bits_WIRE_26_tl_state_size}, {_r_bits_WIRE_25_tl_state_size}, {_r_bits_WIRE_24_tl_state_size}, {_r_bits_WIRE_23_tl_state_size}, {_r_bits_WIRE_22_tl_state_size}, {_r_bits_WIRE_21_tl_state_size}, {_r_bits_WIRE_20_tl_state_size}, {_r_bits_WIRE_19_tl_state_size}, {_r_bits_WIRE_18_tl_state_size}, {_r_bits_WIRE_17_tl_state_size}, {_r_bits_WIRE_16_tl_state_size}, {_r_bits_WIRE_15_tl_state_size}, {_r_bits_WIRE_14_tl_state_size}, {_r_bits_WIRE_13_tl_state_size}, {_r_bits_WIRE_12_tl_state_size}, {_r_bits_WIRE_11_tl_state_size}, {_r_bits_WIRE_10_tl_state_size}, {_r_bits_WIRE_9_tl_state_size}, {_r_bits_WIRE_8_tl_state_size}, {_r_bits_WIRE_7_tl_state_size}, {_r_bits_WIRE_6_tl_state_size}, {_r_bits_WIRE_5_tl_state_size}, {_r_bits_WIRE_4_tl_state_size}, {_r_bits_WIRE_3_tl_state_size}, {_r_bits_WIRE_2_tl_state_size}, {_r_bits_WIRE_1_tl_state_size}, {_r_bits_WIRE_0_tl_state_size}}; // @[UserYanker.scala:68:27, :73:22] assign nodeIn_r_bits_echo_tl_state_size = _GEN_0[nodeOut_r_bits_id]; // @[UserYanker.scala:73:22] wire [7:0] _r_bits_WIRE_0_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_1_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_2_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_3_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_4_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_5_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_6_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_7_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_8_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_9_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_10_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_11_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_12_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_13_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_14_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_15_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_16_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_17_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_18_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_19_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_20_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_21_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_22_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_23_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_24_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_25_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_26_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_27_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_28_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_29_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_30_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_31_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_32_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_33_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_34_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_35_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_36_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_37_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_38_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_39_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_40_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_41_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_42_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_43_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_44_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_45_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_46_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_47_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_48_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_49_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_50_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_51_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_52_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_53_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_54_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_55_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_56_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_57_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_58_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_59_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_60_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_61_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_62_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_63_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_64_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_65_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_66_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_67_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_68_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_69_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_70_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_71_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_72_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_73_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_74_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_75_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_76_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_77_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_78_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_79_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_80_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_81_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_82_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_83_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_84_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_85_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_86_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_87_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_88_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_89_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_90_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_91_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_92_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_93_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_94_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_95_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_96_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_97_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_98_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_99_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_100_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_101_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_102_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_103_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_104_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_105_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_106_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_107_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_108_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_109_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_110_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_111_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_112_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_113_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_114_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_115_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_116_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_117_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_118_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_119_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_120_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_121_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_122_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_123_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_124_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_125_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_126_tl_state_source; // @[UserYanker.scala:68:27] wire [7:0] _r_bits_WIRE_127_tl_state_source; // @[UserYanker.scala:68:27] wire [127:0][7:0] _GEN_1 = {{_r_bits_WIRE_127_tl_state_source}, {_r_bits_WIRE_126_tl_state_source}, {_r_bits_WIRE_125_tl_state_source}, {_r_bits_WIRE_124_tl_state_source}, {_r_bits_WIRE_123_tl_state_source}, {_r_bits_WIRE_122_tl_state_source}, {_r_bits_WIRE_121_tl_state_source}, {_r_bits_WIRE_120_tl_state_source}, {_r_bits_WIRE_119_tl_state_source}, {_r_bits_WIRE_118_tl_state_source}, {_r_bits_WIRE_117_tl_state_source}, {_r_bits_WIRE_116_tl_state_source}, {_r_bits_WIRE_115_tl_state_source}, {_r_bits_WIRE_114_tl_state_source}, {_r_bits_WIRE_113_tl_state_source}, {_r_bits_WIRE_112_tl_state_source}, {_r_bits_WIRE_111_tl_state_source}, {_r_bits_WIRE_110_tl_state_source}, {_r_bits_WIRE_109_tl_state_source}, {_r_bits_WIRE_108_tl_state_source}, {_r_bits_WIRE_107_tl_state_source}, {_r_bits_WIRE_106_tl_state_source}, {_r_bits_WIRE_105_tl_state_source}, {_r_bits_WIRE_104_tl_state_source}, {_r_bits_WIRE_103_tl_state_source}, {_r_bits_WIRE_102_tl_state_source}, {_r_bits_WIRE_101_tl_state_source}, {_r_bits_WIRE_100_tl_state_source}, {_r_bits_WIRE_99_tl_state_source}, {_r_bits_WIRE_98_tl_state_source}, {_r_bits_WIRE_97_tl_state_source}, {_r_bits_WIRE_96_tl_state_source}, {_r_bits_WIRE_95_tl_state_source}, {_r_bits_WIRE_94_tl_state_source}, {_r_bits_WIRE_93_tl_state_source}, {_r_bits_WIRE_92_tl_state_source}, {_r_bits_WIRE_91_tl_state_source}, {_r_bits_WIRE_90_tl_state_source}, {_r_bits_WIRE_89_tl_state_source}, {_r_bits_WIRE_88_tl_state_source}, {_r_bits_WIRE_87_tl_state_source}, {_r_bits_WIRE_86_tl_state_source}, {_r_bits_WIRE_85_tl_state_source}, {_r_bits_WIRE_84_tl_state_source}, {_r_bits_WIRE_83_tl_state_source}, {_r_bits_WIRE_82_tl_state_source}, {_r_bits_WIRE_81_tl_state_source}, {_r_bits_WIRE_80_tl_state_source}, {_r_bits_WIRE_79_tl_state_source}, {_r_bits_WIRE_78_tl_state_source}, {_r_bits_WIRE_77_tl_state_source}, {_r_bits_WIRE_76_tl_state_source}, {_r_bits_WIRE_75_tl_state_source}, {_r_bits_WIRE_74_tl_state_source}, {_r_bits_WIRE_73_tl_state_source}, {_r_bits_WIRE_72_tl_state_source}, {_r_bits_WIRE_71_tl_state_source}, {_r_bits_WIRE_70_tl_state_source}, {_r_bits_WIRE_69_tl_state_source}, {_r_bits_WIRE_68_tl_state_source}, {_r_bits_WIRE_67_tl_state_source}, {_r_bits_WIRE_66_tl_state_source}, {_r_bits_WIRE_65_tl_state_source}, {_r_bits_WIRE_64_tl_state_source}, {_r_bits_WIRE_63_tl_state_source}, {_r_bits_WIRE_62_tl_state_source}, {_r_bits_WIRE_61_tl_state_source}, {_r_bits_WIRE_60_tl_state_source}, {_r_bits_WIRE_59_tl_state_source}, {_r_bits_WIRE_58_tl_state_source}, {_r_bits_WIRE_57_tl_state_source}, {_r_bits_WIRE_56_tl_state_source}, {_r_bits_WIRE_55_tl_state_source}, {_r_bits_WIRE_54_tl_state_source}, {_r_bits_WIRE_53_tl_state_source}, {_r_bits_WIRE_52_tl_state_source}, {_r_bits_WIRE_51_tl_state_source}, {_r_bits_WIRE_50_tl_state_source}, {_r_bits_WIRE_49_tl_state_source}, {_r_bits_WIRE_48_tl_state_source}, {_r_bits_WIRE_47_tl_state_source}, {_r_bits_WIRE_46_tl_state_source}, {_r_bits_WIRE_45_tl_state_source}, {_r_bits_WIRE_44_tl_state_source}, {_r_bits_WIRE_43_tl_state_source}, {_r_bits_WIRE_42_tl_state_source}, {_r_bits_WIRE_41_tl_state_source}, {_r_bits_WIRE_40_tl_state_source}, {_r_bits_WIRE_39_tl_state_source}, {_r_bits_WIRE_38_tl_state_source}, {_r_bits_WIRE_37_tl_state_source}, {_r_bits_WIRE_36_tl_state_source}, {_r_bits_WIRE_35_tl_state_source}, {_r_bits_WIRE_34_tl_state_source}, {_r_bits_WIRE_33_tl_state_source}, {_r_bits_WIRE_32_tl_state_source}, {_r_bits_WIRE_31_tl_state_source}, {_r_bits_WIRE_30_tl_state_source}, {_r_bits_WIRE_29_tl_state_source}, {_r_bits_WIRE_28_tl_state_source}, {_r_bits_WIRE_27_tl_state_source}, {_r_bits_WIRE_26_tl_state_source}, {_r_bits_WIRE_25_tl_state_source}, {_r_bits_WIRE_24_tl_state_source}, {_r_bits_WIRE_23_tl_state_source}, {_r_bits_WIRE_22_tl_state_source}, {_r_bits_WIRE_21_tl_state_source}, {_r_bits_WIRE_20_tl_state_source}, {_r_bits_WIRE_19_tl_state_source}, {_r_bits_WIRE_18_tl_state_source}, {_r_bits_WIRE_17_tl_state_source}, {_r_bits_WIRE_16_tl_state_source}, {_r_bits_WIRE_15_tl_state_source}, {_r_bits_WIRE_14_tl_state_source}, {_r_bits_WIRE_13_tl_state_source}, {_r_bits_WIRE_12_tl_state_source}, {_r_bits_WIRE_11_tl_state_source}, {_r_bits_WIRE_10_tl_state_source}, {_r_bits_WIRE_9_tl_state_source}, {_r_bits_WIRE_8_tl_state_source}, {_r_bits_WIRE_7_tl_state_source}, {_r_bits_WIRE_6_tl_state_source}, {_r_bits_WIRE_5_tl_state_source}, {_r_bits_WIRE_4_tl_state_source}, {_r_bits_WIRE_3_tl_state_source}, {_r_bits_WIRE_2_tl_state_source}, {_r_bits_WIRE_1_tl_state_source}, {_r_bits_WIRE_0_tl_state_source}}; // @[UserYanker.scala:68:27, :73:22] assign nodeIn_r_bits_echo_tl_state_source = _GEN_1[nodeOut_r_bits_id]; // @[UserYanker.scala:73:22] wire _r_bits_WIRE_0_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_1_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_2_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_3_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_4_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_5_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_6_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_7_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_8_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_9_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_10_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_11_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_12_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_13_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_14_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_15_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_16_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_17_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_18_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_19_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_20_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_21_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_22_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_23_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_24_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_25_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_26_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_27_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_28_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_29_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_30_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_31_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_32_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_33_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_34_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_35_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_36_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_37_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_38_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_39_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_40_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_41_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_42_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_43_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_44_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_45_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_46_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_47_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_48_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_49_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_50_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_51_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_52_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_53_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_54_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_55_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_56_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_57_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_58_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_59_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_60_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_61_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_62_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_63_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_64_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_65_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_66_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_67_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_68_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_69_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_70_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_71_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_72_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_73_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_74_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_75_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_76_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_77_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_78_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_79_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_80_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_81_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_82_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_83_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_84_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_85_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_86_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_87_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_88_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_89_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_90_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_91_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_92_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_93_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_94_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_95_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_96_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_97_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_98_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_99_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_100_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_101_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_102_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_103_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_104_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_105_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_106_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_107_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_108_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_109_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_110_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_111_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_112_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_113_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_114_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_115_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_116_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_117_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_118_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_119_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_120_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_121_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_122_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_123_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_124_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_125_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_126_extra_id; // @[UserYanker.scala:68:27] wire _r_bits_WIRE_127_extra_id; // @[UserYanker.scala:68:27] wire [127:0] _GEN_2 = {{_r_bits_WIRE_127_extra_id}, {_r_bits_WIRE_126_extra_id}, {_r_bits_WIRE_125_extra_id}, {_r_bits_WIRE_124_extra_id}, {_r_bits_WIRE_123_extra_id}, {_r_bits_WIRE_122_extra_id}, {_r_bits_WIRE_121_extra_id}, {_r_bits_WIRE_120_extra_id}, {_r_bits_WIRE_119_extra_id}, {_r_bits_WIRE_118_extra_id}, {_r_bits_WIRE_117_extra_id}, {_r_bits_WIRE_116_extra_id}, {_r_bits_WIRE_115_extra_id}, {_r_bits_WIRE_114_extra_id}, {_r_bits_WIRE_113_extra_id}, {_r_bits_WIRE_112_extra_id}, {_r_bits_WIRE_111_extra_id}, {_r_bits_WIRE_110_extra_id}, {_r_bits_WIRE_109_extra_id}, {_r_bits_WIRE_108_extra_id}, {_r_bits_WIRE_107_extra_id}, {_r_bits_WIRE_106_extra_id}, {_r_bits_WIRE_105_extra_id}, {_r_bits_WIRE_104_extra_id}, {_r_bits_WIRE_103_extra_id}, {_r_bits_WIRE_102_extra_id}, {_r_bits_WIRE_101_extra_id}, {_r_bits_WIRE_100_extra_id}, {_r_bits_WIRE_99_extra_id}, {_r_bits_WIRE_98_extra_id}, {_r_bits_WIRE_97_extra_id}, {_r_bits_WIRE_96_extra_id}, {_r_bits_WIRE_95_extra_id}, {_r_bits_WIRE_94_extra_id}, {_r_bits_WIRE_93_extra_id}, {_r_bits_WIRE_92_extra_id}, {_r_bits_WIRE_91_extra_id}, {_r_bits_WIRE_90_extra_id}, {_r_bits_WIRE_89_extra_id}, {_r_bits_WIRE_88_extra_id}, {_r_bits_WIRE_87_extra_id}, {_r_bits_WIRE_86_extra_id}, {_r_bits_WIRE_85_extra_id}, {_r_bits_WIRE_84_extra_id}, {_r_bits_WIRE_83_extra_id}, {_r_bits_WIRE_82_extra_id}, {_r_bits_WIRE_81_extra_id}, {_r_bits_WIRE_80_extra_id}, {_r_bits_WIRE_79_extra_id}, {_r_bits_WIRE_78_extra_id}, {_r_bits_WIRE_77_extra_id}, {_r_bits_WIRE_76_extra_id}, {_r_bits_WIRE_75_extra_id}, {_r_bits_WIRE_74_extra_id}, {_r_bits_WIRE_73_extra_id}, {_r_bits_WIRE_72_extra_id}, {_r_bits_WIRE_71_extra_id}, {_r_bits_WIRE_70_extra_id}, {_r_bits_WIRE_69_extra_id}, {_r_bits_WIRE_68_extra_id}, {_r_bits_WIRE_67_extra_id}, {_r_bits_WIRE_66_extra_id}, {_r_bits_WIRE_65_extra_id}, {_r_bits_WIRE_64_extra_id}, {_r_bits_WIRE_63_extra_id}, {_r_bits_WIRE_62_extra_id}, {_r_bits_WIRE_61_extra_id}, {_r_bits_WIRE_60_extra_id}, {_r_bits_WIRE_59_extra_id}, {_r_bits_WIRE_58_extra_id}, {_r_bits_WIRE_57_extra_id}, {_r_bits_WIRE_56_extra_id}, {_r_bits_WIRE_55_extra_id}, {_r_bits_WIRE_54_extra_id}, {_r_bits_WIRE_53_extra_id}, {_r_bits_WIRE_52_extra_id}, {_r_bits_WIRE_51_extra_id}, {_r_bits_WIRE_50_extra_id}, {_r_bits_WIRE_49_extra_id}, {_r_bits_WIRE_48_extra_id}, {_r_bits_WIRE_47_extra_id}, {_r_bits_WIRE_46_extra_id}, {_r_bits_WIRE_45_extra_id}, {_r_bits_WIRE_44_extra_id}, {_r_bits_WIRE_43_extra_id}, {_r_bits_WIRE_42_extra_id}, {_r_bits_WIRE_41_extra_id}, {_r_bits_WIRE_40_extra_id}, {_r_bits_WIRE_39_extra_id}, {_r_bits_WIRE_38_extra_id}, {_r_bits_WIRE_37_extra_id}, {_r_bits_WIRE_36_extra_id}, {_r_bits_WIRE_35_extra_id}, {_r_bits_WIRE_34_extra_id}, {_r_bits_WIRE_33_extra_id}, {_r_bits_WIRE_32_extra_id}, {_r_bits_WIRE_31_extra_id}, {_r_bits_WIRE_30_extra_id}, {_r_bits_WIRE_29_extra_id}, {_r_bits_WIRE_28_extra_id}, {_r_bits_WIRE_27_extra_id}, {_r_bits_WIRE_26_extra_id}, {_r_bits_WIRE_25_extra_id}, {_r_bits_WIRE_24_extra_id}, {_r_bits_WIRE_23_extra_id}, {_r_bits_WIRE_22_extra_id}, {_r_bits_WIRE_21_extra_id}, {_r_bits_WIRE_20_extra_id}, {_r_bits_WIRE_19_extra_id}, {_r_bits_WIRE_18_extra_id}, {_r_bits_WIRE_17_extra_id}, {_r_bits_WIRE_16_extra_id}, {_r_bits_WIRE_15_extra_id}, {_r_bits_WIRE_14_extra_id}, {_r_bits_WIRE_13_extra_id}, {_r_bits_WIRE_12_extra_id}, {_r_bits_WIRE_11_extra_id}, {_r_bits_WIRE_10_extra_id}, {_r_bits_WIRE_9_extra_id}, {_r_bits_WIRE_8_extra_id}, {_r_bits_WIRE_7_extra_id}, {_r_bits_WIRE_6_extra_id}, {_r_bits_WIRE_5_extra_id}, {_r_bits_WIRE_4_extra_id}, {_r_bits_WIRE_3_extra_id}, {_r_bits_WIRE_2_extra_id}, {_r_bits_WIRE_1_extra_id}, {_r_bits_WIRE_0_extra_id}}; // @[UserYanker.scala:68:27, :73:22] assign nodeIn_r_bits_echo_extra_id = _GEN_2[nodeOut_r_bits_id]; // @[UserYanker.scala:73:22] wire [127:0] _arsel_T = 128'h1 << arsel_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [127:0] _arsel_T_1 = _arsel_T; // @[OneHot.scala:65:{12,27}] wire arsel_0 = _arsel_T_1[0]; // @[OneHot.scala:65:27] wire arsel_1 = _arsel_T_1[1]; // @[OneHot.scala:65:27] wire arsel_2 = _arsel_T_1[2]; // @[OneHot.scala:65:27] wire arsel_3 = _arsel_T_1[3]; // @[OneHot.scala:65:27] wire arsel_4 = _arsel_T_1[4]; // @[OneHot.scala:65:27] wire arsel_5 = _arsel_T_1[5]; // @[OneHot.scala:65:27] wire arsel_6 = _arsel_T_1[6]; // @[OneHot.scala:65:27] wire arsel_7 = _arsel_T_1[7]; // @[OneHot.scala:65:27] wire arsel_8 = _arsel_T_1[8]; // @[OneHot.scala:65:27] wire arsel_9 = _arsel_T_1[9]; // @[OneHot.scala:65:27] wire arsel_10 = _arsel_T_1[10]; // @[OneHot.scala:65:27] wire arsel_11 = _arsel_T_1[11]; // @[OneHot.scala:65:27] wire arsel_12 = _arsel_T_1[12]; // @[OneHot.scala:65:27] wire arsel_13 = _arsel_T_1[13]; // @[OneHot.scala:65:27] wire arsel_14 = _arsel_T_1[14]; // @[OneHot.scala:65:27] wire arsel_15 = _arsel_T_1[15]; // @[OneHot.scala:65:27] wire arsel_16 = _arsel_T_1[16]; // @[OneHot.scala:65:27] wire arsel_17 = _arsel_T_1[17]; // @[OneHot.scala:65:27] wire arsel_18 = _arsel_T_1[18]; // @[OneHot.scala:65:27] wire arsel_19 = _arsel_T_1[19]; // @[OneHot.scala:65:27] wire arsel_20 = _arsel_T_1[20]; // @[OneHot.scala:65:27] wire arsel_21 = _arsel_T_1[21]; // @[OneHot.scala:65:27] wire arsel_22 = _arsel_T_1[22]; // @[OneHot.scala:65:27] wire arsel_23 = _arsel_T_1[23]; // @[OneHot.scala:65:27] wire arsel_24 = _arsel_T_1[24]; // @[OneHot.scala:65:27] wire arsel_25 = _arsel_T_1[25]; // @[OneHot.scala:65:27] wire arsel_26 = _arsel_T_1[26]; // @[OneHot.scala:65:27] wire arsel_27 = _arsel_T_1[27]; // @[OneHot.scala:65:27] wire arsel_28 = _arsel_T_1[28]; // @[OneHot.scala:65:27] wire arsel_29 = _arsel_T_1[29]; // @[OneHot.scala:65:27] wire arsel_30 = _arsel_T_1[30]; // @[OneHot.scala:65:27] wire arsel_31 = _arsel_T_1[31]; // @[OneHot.scala:65:27] wire arsel_32 = _arsel_T_1[32]; // @[OneHot.scala:65:27] wire arsel_33 = _arsel_T_1[33]; // @[OneHot.scala:65:27] wire arsel_34 = _arsel_T_1[34]; // @[OneHot.scala:65:27] wire arsel_35 = _arsel_T_1[35]; // @[OneHot.scala:65:27] wire arsel_36 = _arsel_T_1[36]; // @[OneHot.scala:65:27] wire arsel_37 = _arsel_T_1[37]; // @[OneHot.scala:65:27] wire arsel_38 = _arsel_T_1[38]; // @[OneHot.scala:65:27] wire arsel_39 = _arsel_T_1[39]; // @[OneHot.scala:65:27] wire arsel_40 = _arsel_T_1[40]; // @[OneHot.scala:65:27] wire arsel_41 = _arsel_T_1[41]; // @[OneHot.scala:65:27] wire arsel_42 = _arsel_T_1[42]; // @[OneHot.scala:65:27] wire arsel_43 = _arsel_T_1[43]; // @[OneHot.scala:65:27] wire arsel_44 = _arsel_T_1[44]; // @[OneHot.scala:65:27] wire arsel_45 = _arsel_T_1[45]; // @[OneHot.scala:65:27] wire arsel_46 = _arsel_T_1[46]; // @[OneHot.scala:65:27] wire arsel_47 = _arsel_T_1[47]; // @[OneHot.scala:65:27] wire arsel_48 = _arsel_T_1[48]; // @[OneHot.scala:65:27] wire arsel_49 = _arsel_T_1[49]; // @[OneHot.scala:65:27] wire arsel_50 = _arsel_T_1[50]; // @[OneHot.scala:65:27] wire arsel_51 = _arsel_T_1[51]; // @[OneHot.scala:65:27] wire arsel_52 = _arsel_T_1[52]; // @[OneHot.scala:65:27] wire arsel_53 = _arsel_T_1[53]; // @[OneHot.scala:65:27] wire arsel_54 = _arsel_T_1[54]; // @[OneHot.scala:65:27] wire arsel_55 = _arsel_T_1[55]; // @[OneHot.scala:65:27] wire arsel_56 = _arsel_T_1[56]; // @[OneHot.scala:65:27] wire arsel_57 = _arsel_T_1[57]; // @[OneHot.scala:65:27] wire arsel_58 = _arsel_T_1[58]; // @[OneHot.scala:65:27] wire arsel_59 = _arsel_T_1[59]; // @[OneHot.scala:65:27] wire arsel_60 = _arsel_T_1[60]; // @[OneHot.scala:65:27] wire arsel_61 = _arsel_T_1[61]; // @[OneHot.scala:65:27] wire arsel_62 = _arsel_T_1[62]; // @[OneHot.scala:65:27] wire arsel_63 = _arsel_T_1[63]; // @[OneHot.scala:65:27] wire arsel_64 = _arsel_T_1[64]; // @[OneHot.scala:65:27] wire arsel_65 = _arsel_T_1[65]; // @[OneHot.scala:65:27] wire arsel_66 = _arsel_T_1[66]; // @[OneHot.scala:65:27] wire arsel_67 = _arsel_T_1[67]; // @[OneHot.scala:65:27] wire arsel_68 = _arsel_T_1[68]; // @[OneHot.scala:65:27] wire arsel_69 = _arsel_T_1[69]; // @[OneHot.scala:65:27] wire arsel_70 = _arsel_T_1[70]; // @[OneHot.scala:65:27] wire arsel_71 = _arsel_T_1[71]; // @[OneHot.scala:65:27] wire arsel_72 = _arsel_T_1[72]; // @[OneHot.scala:65:27] wire arsel_73 = _arsel_T_1[73]; // @[OneHot.scala:65:27] wire arsel_74 = _arsel_T_1[74]; // @[OneHot.scala:65:27] wire arsel_75 = _arsel_T_1[75]; // @[OneHot.scala:65:27] wire arsel_76 = _arsel_T_1[76]; // @[OneHot.scala:65:27] wire arsel_77 = _arsel_T_1[77]; // @[OneHot.scala:65:27] wire arsel_78 = _arsel_T_1[78]; // @[OneHot.scala:65:27] wire arsel_79 = _arsel_T_1[79]; // @[OneHot.scala:65:27] wire arsel_80 = _arsel_T_1[80]; // @[OneHot.scala:65:27] wire arsel_81 = _arsel_T_1[81]; // @[OneHot.scala:65:27] wire arsel_82 = _arsel_T_1[82]; // @[OneHot.scala:65:27] wire arsel_83 = _arsel_T_1[83]; // @[OneHot.scala:65:27] wire arsel_84 = _arsel_T_1[84]; // @[OneHot.scala:65:27] wire arsel_85 = _arsel_T_1[85]; // @[OneHot.scala:65:27] wire arsel_86 = _arsel_T_1[86]; // @[OneHot.scala:65:27] wire arsel_87 = _arsel_T_1[87]; // @[OneHot.scala:65:27] wire arsel_88 = _arsel_T_1[88]; // @[OneHot.scala:65:27] wire arsel_89 = _arsel_T_1[89]; // @[OneHot.scala:65:27] wire arsel_90 = _arsel_T_1[90]; // @[OneHot.scala:65:27] wire arsel_91 = _arsel_T_1[91]; // @[OneHot.scala:65:27] wire arsel_92 = _arsel_T_1[92]; // @[OneHot.scala:65:27] wire arsel_93 = _arsel_T_1[93]; // @[OneHot.scala:65:27] wire arsel_94 = _arsel_T_1[94]; // @[OneHot.scala:65:27] wire arsel_95 = _arsel_T_1[95]; // @[OneHot.scala:65:27] wire arsel_96 = _arsel_T_1[96]; // @[OneHot.scala:65:27] wire arsel_97 = _arsel_T_1[97]; // @[OneHot.scala:65:27] wire arsel_98 = _arsel_T_1[98]; // @[OneHot.scala:65:27] wire arsel_99 = _arsel_T_1[99]; // @[OneHot.scala:65:27] wire arsel_100 = _arsel_T_1[100]; // @[OneHot.scala:65:27] wire arsel_101 = _arsel_T_1[101]; // @[OneHot.scala:65:27] wire arsel_102 = _arsel_T_1[102]; // @[OneHot.scala:65:27] wire arsel_103 = _arsel_T_1[103]; // @[OneHot.scala:65:27] wire arsel_104 = _arsel_T_1[104]; // @[OneHot.scala:65:27] wire arsel_105 = _arsel_T_1[105]; // @[OneHot.scala:65:27] wire arsel_106 = _arsel_T_1[106]; // @[OneHot.scala:65:27] wire arsel_107 = _arsel_T_1[107]; // @[OneHot.scala:65:27] wire arsel_108 = _arsel_T_1[108]; // @[OneHot.scala:65:27] wire arsel_109 = _arsel_T_1[109]; // @[OneHot.scala:65:27] wire arsel_110 = _arsel_T_1[110]; // @[OneHot.scala:65:27] wire arsel_111 = _arsel_T_1[111]; // @[OneHot.scala:65:27] wire arsel_112 = _arsel_T_1[112]; // @[OneHot.scala:65:27] wire arsel_113 = _arsel_T_1[113]; // @[OneHot.scala:65:27] wire arsel_114 = _arsel_T_1[114]; // @[OneHot.scala:65:27] wire arsel_115 = _arsel_T_1[115]; // @[OneHot.scala:65:27] wire arsel_116 = _arsel_T_1[116]; // @[OneHot.scala:65:27] wire arsel_117 = _arsel_T_1[117]; // @[OneHot.scala:65:27] wire arsel_118 = _arsel_T_1[118]; // @[OneHot.scala:65:27] wire arsel_119 = _arsel_T_1[119]; // @[OneHot.scala:65:27] wire arsel_120 = _arsel_T_1[120]; // @[OneHot.scala:65:27] wire arsel_121 = _arsel_T_1[121]; // @[OneHot.scala:65:27] wire arsel_122 = _arsel_T_1[122]; // @[OneHot.scala:65:27] wire arsel_123 = _arsel_T_1[123]; // @[OneHot.scala:65:27] wire arsel_124 = _arsel_T_1[124]; // @[OneHot.scala:65:27] wire arsel_125 = _arsel_T_1[125]; // @[OneHot.scala:65:27] wire arsel_126 = _arsel_T_1[126]; // @[OneHot.scala:65:27] wire arsel_127 = _arsel_T_1[127]; // @[OneHot.scala:65:27] wire [127:0] _rsel_T = 128'h1 << rsel_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [127:0] _rsel_T_1 = _rsel_T; // @[OneHot.scala:65:{12,27}] wire rsel_0 = _rsel_T_1[0]; // @[OneHot.scala:65:27] wire rsel_1 = _rsel_T_1[1]; // @[OneHot.scala:65:27] wire rsel_2 = _rsel_T_1[2]; // @[OneHot.scala:65:27] wire rsel_3 = _rsel_T_1[3]; // @[OneHot.scala:65:27] wire rsel_4 = _rsel_T_1[4]; // @[OneHot.scala:65:27] wire rsel_5 = _rsel_T_1[5]; // @[OneHot.scala:65:27] wire rsel_6 = _rsel_T_1[6]; // @[OneHot.scala:65:27] wire rsel_7 = _rsel_T_1[7]; // @[OneHot.scala:65:27] wire rsel_8 = _rsel_T_1[8]; // @[OneHot.scala:65:27] wire rsel_9 = _rsel_T_1[9]; // @[OneHot.scala:65:27] wire rsel_10 = _rsel_T_1[10]; // @[OneHot.scala:65:27] wire rsel_11 = _rsel_T_1[11]; // @[OneHot.scala:65:27] wire rsel_12 = _rsel_T_1[12]; // @[OneHot.scala:65:27] wire rsel_13 = _rsel_T_1[13]; // @[OneHot.scala:65:27] wire rsel_14 = _rsel_T_1[14]; // @[OneHot.scala:65:27] wire rsel_15 = _rsel_T_1[15]; // @[OneHot.scala:65:27] wire rsel_16 = _rsel_T_1[16]; // @[OneHot.scala:65:27] wire rsel_17 = _rsel_T_1[17]; // @[OneHot.scala:65:27] wire rsel_18 = _rsel_T_1[18]; // @[OneHot.scala:65:27] wire rsel_19 = _rsel_T_1[19]; // @[OneHot.scala:65:27] wire rsel_20 = _rsel_T_1[20]; // @[OneHot.scala:65:27] wire rsel_21 = _rsel_T_1[21]; // @[OneHot.scala:65:27] wire rsel_22 = _rsel_T_1[22]; // @[OneHot.scala:65:27] wire rsel_23 = _rsel_T_1[23]; // @[OneHot.scala:65:27] wire rsel_24 = _rsel_T_1[24]; // @[OneHot.scala:65:27] wire rsel_25 = _rsel_T_1[25]; // @[OneHot.scala:65:27] wire rsel_26 = _rsel_T_1[26]; // @[OneHot.scala:65:27] wire rsel_27 = _rsel_T_1[27]; // @[OneHot.scala:65:27] wire rsel_28 = _rsel_T_1[28]; // @[OneHot.scala:65:27] wire rsel_29 = _rsel_T_1[29]; // @[OneHot.scala:65:27] wire rsel_30 = _rsel_T_1[30]; // @[OneHot.scala:65:27] wire rsel_31 = _rsel_T_1[31]; // @[OneHot.scala:65:27] wire rsel_32 = _rsel_T_1[32]; // @[OneHot.scala:65:27] wire rsel_33 = _rsel_T_1[33]; // @[OneHot.scala:65:27] wire rsel_34 = _rsel_T_1[34]; // @[OneHot.scala:65:27] wire rsel_35 = _rsel_T_1[35]; // @[OneHot.scala:65:27] wire rsel_36 = _rsel_T_1[36]; // @[OneHot.scala:65:27] wire rsel_37 = _rsel_T_1[37]; // @[OneHot.scala:65:27] wire rsel_38 = _rsel_T_1[38]; // @[OneHot.scala:65:27] wire rsel_39 = _rsel_T_1[39]; // @[OneHot.scala:65:27] wire rsel_40 = _rsel_T_1[40]; // @[OneHot.scala:65:27] wire rsel_41 = _rsel_T_1[41]; // @[OneHot.scala:65:27] wire rsel_42 = _rsel_T_1[42]; // @[OneHot.scala:65:27] wire rsel_43 = _rsel_T_1[43]; // @[OneHot.scala:65:27] wire rsel_44 = _rsel_T_1[44]; // @[OneHot.scala:65:27] wire rsel_45 = _rsel_T_1[45]; // @[OneHot.scala:65:27] wire rsel_46 = _rsel_T_1[46]; // @[OneHot.scala:65:27] wire rsel_47 = _rsel_T_1[47]; // @[OneHot.scala:65:27] wire rsel_48 = _rsel_T_1[48]; // @[OneHot.scala:65:27] wire rsel_49 = _rsel_T_1[49]; // @[OneHot.scala:65:27] wire rsel_50 = _rsel_T_1[50]; // @[OneHot.scala:65:27] wire rsel_51 = _rsel_T_1[51]; // @[OneHot.scala:65:27] wire rsel_52 = _rsel_T_1[52]; // @[OneHot.scala:65:27] wire rsel_53 = _rsel_T_1[53]; // @[OneHot.scala:65:27] wire rsel_54 = _rsel_T_1[54]; // @[OneHot.scala:65:27] wire rsel_55 = _rsel_T_1[55]; // @[OneHot.scala:65:27] wire rsel_56 = _rsel_T_1[56]; // @[OneHot.scala:65:27] wire rsel_57 = _rsel_T_1[57]; // @[OneHot.scala:65:27] wire rsel_58 = _rsel_T_1[58]; // @[OneHot.scala:65:27] wire rsel_59 = _rsel_T_1[59]; // @[OneHot.scala:65:27] wire rsel_60 = _rsel_T_1[60]; // @[OneHot.scala:65:27] wire rsel_61 = _rsel_T_1[61]; // @[OneHot.scala:65:27] wire rsel_62 = _rsel_T_1[62]; // @[OneHot.scala:65:27] wire rsel_63 = _rsel_T_1[63]; // @[OneHot.scala:65:27] wire rsel_64 = _rsel_T_1[64]; // @[OneHot.scala:65:27] wire rsel_65 = _rsel_T_1[65]; // @[OneHot.scala:65:27] wire rsel_66 = _rsel_T_1[66]; // @[OneHot.scala:65:27] wire rsel_67 = _rsel_T_1[67]; // @[OneHot.scala:65:27] wire rsel_68 = _rsel_T_1[68]; // @[OneHot.scala:65:27] wire rsel_69 = _rsel_T_1[69]; // @[OneHot.scala:65:27] wire rsel_70 = _rsel_T_1[70]; // @[OneHot.scala:65:27] wire rsel_71 = _rsel_T_1[71]; // @[OneHot.scala:65:27] wire rsel_72 = _rsel_T_1[72]; // @[OneHot.scala:65:27] wire rsel_73 = _rsel_T_1[73]; // @[OneHot.scala:65:27] wire rsel_74 = _rsel_T_1[74]; // @[OneHot.scala:65:27] wire rsel_75 = _rsel_T_1[75]; // @[OneHot.scala:65:27] wire rsel_76 = _rsel_T_1[76]; // @[OneHot.scala:65:27] wire rsel_77 = _rsel_T_1[77]; // @[OneHot.scala:65:27] wire rsel_78 = _rsel_T_1[78]; // @[OneHot.scala:65:27] wire rsel_79 = _rsel_T_1[79]; // @[OneHot.scala:65:27] wire rsel_80 = _rsel_T_1[80]; // @[OneHot.scala:65:27] wire rsel_81 = _rsel_T_1[81]; // @[OneHot.scala:65:27] wire rsel_82 = _rsel_T_1[82]; // @[OneHot.scala:65:27] wire rsel_83 = _rsel_T_1[83]; // @[OneHot.scala:65:27] wire rsel_84 = _rsel_T_1[84]; // @[OneHot.scala:65:27] wire rsel_85 = _rsel_T_1[85]; // @[OneHot.scala:65:27] wire rsel_86 = _rsel_T_1[86]; // @[OneHot.scala:65:27] wire rsel_87 = _rsel_T_1[87]; // @[OneHot.scala:65:27] wire rsel_88 = _rsel_T_1[88]; // @[OneHot.scala:65:27] wire rsel_89 = _rsel_T_1[89]; // @[OneHot.scala:65:27] wire rsel_90 = _rsel_T_1[90]; // @[OneHot.scala:65:27] wire rsel_91 = _rsel_T_1[91]; // @[OneHot.scala:65:27] wire rsel_92 = _rsel_T_1[92]; // @[OneHot.scala:65:27] wire rsel_93 = _rsel_T_1[93]; // @[OneHot.scala:65:27] wire rsel_94 = _rsel_T_1[94]; // @[OneHot.scala:65:27] wire rsel_95 = _rsel_T_1[95]; // @[OneHot.scala:65:27] wire rsel_96 = _rsel_T_1[96]; // @[OneHot.scala:65:27] wire rsel_97 = _rsel_T_1[97]; // @[OneHot.scala:65:27] wire rsel_98 = _rsel_T_1[98]; // @[OneHot.scala:65:27] wire rsel_99 = _rsel_T_1[99]; // @[OneHot.scala:65:27] wire rsel_100 = _rsel_T_1[100]; // @[OneHot.scala:65:27] wire rsel_101 = _rsel_T_1[101]; // @[OneHot.scala:65:27] wire rsel_102 = _rsel_T_1[102]; // @[OneHot.scala:65:27] wire rsel_103 = _rsel_T_1[103]; // @[OneHot.scala:65:27] wire rsel_104 = _rsel_T_1[104]; // @[OneHot.scala:65:27] wire rsel_105 = _rsel_T_1[105]; // @[OneHot.scala:65:27] wire rsel_106 = _rsel_T_1[106]; // @[OneHot.scala:65:27] wire rsel_107 = _rsel_T_1[107]; // @[OneHot.scala:65:27] wire rsel_108 = _rsel_T_1[108]; // @[OneHot.scala:65:27] wire rsel_109 = _rsel_T_1[109]; // @[OneHot.scala:65:27] wire rsel_110 = _rsel_T_1[110]; // @[OneHot.scala:65:27] wire rsel_111 = _rsel_T_1[111]; // @[OneHot.scala:65:27] wire rsel_112 = _rsel_T_1[112]; // @[OneHot.scala:65:27] wire rsel_113 = _rsel_T_1[113]; // @[OneHot.scala:65:27] wire rsel_114 = _rsel_T_1[114]; // @[OneHot.scala:65:27] wire rsel_115 = _rsel_T_1[115]; // @[OneHot.scala:65:27] wire rsel_116 = _rsel_T_1[116]; // @[OneHot.scala:65:27] wire rsel_117 = _rsel_T_1[117]; // @[OneHot.scala:65:27] wire rsel_118 = _rsel_T_1[118]; // @[OneHot.scala:65:27] wire rsel_119 = _rsel_T_1[119]; // @[OneHot.scala:65:27] wire rsel_120 = _rsel_T_1[120]; // @[OneHot.scala:65:27] wire rsel_121 = _rsel_T_1[121]; // @[OneHot.scala:65:27] wire rsel_122 = _rsel_T_1[122]; // @[OneHot.scala:65:27] wire rsel_123 = _rsel_T_1[123]; // @[OneHot.scala:65:27] wire rsel_124 = _rsel_T_1[124]; // @[OneHot.scala:65:27] wire rsel_125 = _rsel_T_1[125]; // @[OneHot.scala:65:27] wire rsel_126 = _rsel_T_1[126]; // @[OneHot.scala:65:27] wire rsel_127 = _rsel_T_1[127]; // @[OneHot.scala:65:27] wire _T_640 = nodeOut_r_valid & nodeIn_r_ready; // @[UserYanker.scala:78:37] wire _T_643 = nodeIn_ar_valid & nodeOut_ar_ready; // @[UserYanker.scala:81:37] wire _aw_ready_WIRE_0; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_1; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_2; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_3; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_4; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_5; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_6; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_7; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_8; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_9; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_10; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_11; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_12; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_13; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_14; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_15; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_16; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_17; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_18; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_19; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_20; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_21; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_22; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_23; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_24; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_25; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_26; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_27; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_28; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_29; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_30; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_31; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_32; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_33; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_34; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_35; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_36; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_37; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_38; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_39; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_40; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_41; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_42; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_43; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_44; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_45; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_46; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_47; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_48; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_49; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_50; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_51; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_52; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_53; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_54; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_55; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_56; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_57; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_58; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_59; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_60; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_61; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_62; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_63; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_64; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_65; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_66; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_67; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_68; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_69; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_70; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_71; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_72; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_73; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_74; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_75; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_76; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_77; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_78; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_79; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_80; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_81; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_82; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_83; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_84; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_85; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_86; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_87; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_88; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_89; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_90; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_91; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_92; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_93; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_94; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_95; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_96; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_97; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_98; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_99; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_100; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_101; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_102; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_103; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_104; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_105; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_106; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_107; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_108; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_109; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_110; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_111; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_112; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_113; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_114; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_115; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_116; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_117; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_118; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_119; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_120; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_121; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_122; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_123; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_124; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_125; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_126; // @[UserYanker.scala:88:29] wire _aw_ready_WIRE_127; // @[UserYanker.scala:88:29] wire [127:0] _GEN_3 = {{_aw_ready_WIRE_127}, {_aw_ready_WIRE_126}, {_aw_ready_WIRE_125}, {_aw_ready_WIRE_124}, {_aw_ready_WIRE_123}, {_aw_ready_WIRE_122}, {_aw_ready_WIRE_121}, {_aw_ready_WIRE_120}, {_aw_ready_WIRE_119}, {_aw_ready_WIRE_118}, {_aw_ready_WIRE_117}, {_aw_ready_WIRE_116}, {_aw_ready_WIRE_115}, {_aw_ready_WIRE_114}, {_aw_ready_WIRE_113}, {_aw_ready_WIRE_112}, {_aw_ready_WIRE_111}, {_aw_ready_WIRE_110}, {_aw_ready_WIRE_109}, {_aw_ready_WIRE_108}, {_aw_ready_WIRE_107}, {_aw_ready_WIRE_106}, {_aw_ready_WIRE_105}, {_aw_ready_WIRE_104}, {_aw_ready_WIRE_103}, {_aw_ready_WIRE_102}, {_aw_ready_WIRE_101}, {_aw_ready_WIRE_100}, {_aw_ready_WIRE_99}, {_aw_ready_WIRE_98}, {_aw_ready_WIRE_97}, {_aw_ready_WIRE_96}, {_aw_ready_WIRE_95}, {_aw_ready_WIRE_94}, {_aw_ready_WIRE_93}, {_aw_ready_WIRE_92}, {_aw_ready_WIRE_91}, {_aw_ready_WIRE_90}, {_aw_ready_WIRE_89}, {_aw_ready_WIRE_88}, {_aw_ready_WIRE_87}, {_aw_ready_WIRE_86}, {_aw_ready_WIRE_85}, {_aw_ready_WIRE_84}, {_aw_ready_WIRE_83}, {_aw_ready_WIRE_82}, {_aw_ready_WIRE_81}, {_aw_ready_WIRE_80}, {_aw_ready_WIRE_79}, {_aw_ready_WIRE_78}, {_aw_ready_WIRE_77}, {_aw_ready_WIRE_76}, {_aw_ready_WIRE_75}, {_aw_ready_WIRE_74}, {_aw_ready_WIRE_73}, {_aw_ready_WIRE_72}, {_aw_ready_WIRE_71}, {_aw_ready_WIRE_70}, {_aw_ready_WIRE_69}, {_aw_ready_WIRE_68}, {_aw_ready_WIRE_67}, {_aw_ready_WIRE_66}, {_aw_ready_WIRE_65}, {_aw_ready_WIRE_64}, {_aw_ready_WIRE_63}, {_aw_ready_WIRE_62}, {_aw_ready_WIRE_61}, {_aw_ready_WIRE_60}, {_aw_ready_WIRE_59}, {_aw_ready_WIRE_58}, {_aw_ready_WIRE_57}, {_aw_ready_WIRE_56}, {_aw_ready_WIRE_55}, {_aw_ready_WIRE_54}, {_aw_ready_WIRE_53}, {_aw_ready_WIRE_52}, {_aw_ready_WIRE_51}, {_aw_ready_WIRE_50}, {_aw_ready_WIRE_49}, {_aw_ready_WIRE_48}, {_aw_ready_WIRE_47}, {_aw_ready_WIRE_46}, {_aw_ready_WIRE_45}, {_aw_ready_WIRE_44}, {_aw_ready_WIRE_43}, {_aw_ready_WIRE_42}, {_aw_ready_WIRE_41}, {_aw_ready_WIRE_40}, {_aw_ready_WIRE_39}, {_aw_ready_WIRE_38}, {_aw_ready_WIRE_37}, {_aw_ready_WIRE_36}, {_aw_ready_WIRE_35}, {_aw_ready_WIRE_34}, {_aw_ready_WIRE_33}, {_aw_ready_WIRE_32}, {_aw_ready_WIRE_31}, {_aw_ready_WIRE_30}, {_aw_ready_WIRE_29}, {_aw_ready_WIRE_28}, {_aw_ready_WIRE_27}, {_aw_ready_WIRE_26}, {_aw_ready_WIRE_25}, {_aw_ready_WIRE_24}, {_aw_ready_WIRE_23}, {_aw_ready_WIRE_22}, {_aw_ready_WIRE_21}, {_aw_ready_WIRE_20}, {_aw_ready_WIRE_19}, {_aw_ready_WIRE_18}, {_aw_ready_WIRE_17}, {_aw_ready_WIRE_16}, {_aw_ready_WIRE_15}, {_aw_ready_WIRE_14}, {_aw_ready_WIRE_13}, {_aw_ready_WIRE_12}, {_aw_ready_WIRE_11}, {_aw_ready_WIRE_10}, {_aw_ready_WIRE_9}, {_aw_ready_WIRE_8}, {_aw_ready_WIRE_7}, {_aw_ready_WIRE_6}, {_aw_ready_WIRE_5}, {_aw_ready_WIRE_4}, {_aw_ready_WIRE_3}, {_aw_ready_WIRE_2}, {_aw_ready_WIRE_1}, {_aw_ready_WIRE_0}}; // @[UserYanker.scala:88:29, :89:36] assign _nodeIn_aw_ready_T = nodeOut_aw_ready & _GEN_3[nodeIn_aw_bits_id]; // @[UserYanker.scala:89:36] assign nodeIn_aw_ready = _nodeIn_aw_ready_T; // @[UserYanker.scala:89:36] assign _nodeOut_aw_valid_T = nodeIn_aw_valid & _GEN_3[nodeIn_aw_bits_id]; // @[UserYanker.scala:89:36, :90:36] assign nodeOut_aw_valid = _nodeOut_aw_valid_T; // @[UserYanker.scala:90:36] wire _r_valid_WIRE_0; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_1; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_2; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_3; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_4; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_5; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_6; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_7; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_8; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_9; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_10; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_11; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_12; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_13; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_14; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_15; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_16; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_17; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_18; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_19; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_20; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_21; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_22; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_23; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_24; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_25; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_26; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_27; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_28; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_29; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_30; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_31; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_32; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_33; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_34; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_35; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_36; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_37; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_38; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_39; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_40; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_41; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_42; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_43; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_44; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_45; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_46; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_47; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_48; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_49; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_50; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_51; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_52; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_53; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_54; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_55; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_56; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_57; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_58; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_59; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_60; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_61; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_62; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_63; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_64; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_65; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_66; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_67; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_68; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_69; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_70; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_71; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_72; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_73; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_74; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_75; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_76; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_77; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_78; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_79; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_80; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_81; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_82; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_83; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_84; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_85; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_86; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_87; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_88; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_89; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_90; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_91; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_92; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_93; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_94; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_95; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_96; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_97; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_98; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_99; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_100; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_101; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_102; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_103; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_104; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_105; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_106; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_107; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_108; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_109; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_110; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_111; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_112; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_113; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_114; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_115; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_116; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_117; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_118; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_119; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_120; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_121; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_122; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_123; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_124; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_125; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_126; // @[UserYanker.scala:67:28] wire _r_valid_WIRE_127; // @[UserYanker.scala:67:28] wire _b_valid_WIRE_0; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_1; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_2; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_3; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_4; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_5; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_6; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_7; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_8; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_9; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_10; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_11; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_12; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_13; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_14; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_15; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_16; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_17; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_18; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_19; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_20; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_21; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_22; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_23; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_24; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_25; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_26; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_27; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_28; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_29; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_30; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_31; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_32; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_33; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_34; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_35; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_36; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_37; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_38; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_39; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_40; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_41; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_42; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_43; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_44; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_45; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_46; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_47; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_48; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_49; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_50; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_51; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_52; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_53; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_54; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_55; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_56; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_57; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_58; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_59; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_60; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_61; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_62; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_63; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_64; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_65; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_66; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_67; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_68; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_69; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_70; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_71; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_72; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_73; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_74; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_75; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_76; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_77; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_78; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_79; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_80; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_81; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_82; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_83; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_84; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_85; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_86; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_87; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_88; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_89; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_90; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_91; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_92; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_93; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_94; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_95; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_96; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_97; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_98; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_99; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_100; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_101; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_102; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_103; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_104; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_105; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_106; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_107; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_108; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_109; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_110; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_111; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_112; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_113; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_114; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_115; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_116; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_117; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_118; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_119; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_120; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_121; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_122; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_123; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_124; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_125; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_126; // @[UserYanker.scala:96:28] wire _b_valid_WIRE_127; // @[UserYanker.scala:96:28]
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Metadata.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.constants.MemoryOpConstants import freechips.rocketchip.util._ object ClientStates { val width = 2 def Nothing = 0.U(width.W) def Branch = 1.U(width.W) def Trunk = 2.U(width.W) def Dirty = 3.U(width.W) def hasReadPermission(state: UInt): Bool = state > Nothing def hasWritePermission(state: UInt): Bool = state > Branch } object MemoryOpCategories extends MemoryOpConstants { def wr = Cat(true.B, true.B) // Op actually writes def wi = Cat(false.B, true.B) // Future op will write def rd = Cat(false.B, false.B) // Op only reads def categorize(cmd: UInt): UInt = { val cat = Cat(isWrite(cmd), isWriteIntent(cmd)) //assert(cat.isOneOf(wr,wi,rd), "Could not categorize command.") cat } } /** Stores the client-side coherence information, * such as permissions on the data and whether the data is dirty. * Its API can be used to make TileLink messages in response to * memory operations, cache control oeprations, or Probe messages. */ class ClientMetadata extends Bundle { /** Actual state information stored in this bundle */ val state = UInt(ClientStates.width.W) /** Metadata equality */ def ===(rhs: UInt): Bool = state === rhs def ===(rhs: ClientMetadata): Bool = state === rhs.state def =/=(rhs: ClientMetadata): Bool = !this.===(rhs) /** Is the block's data present in this cache */ def isValid(dummy: Int = 0): Bool = state > ClientStates.Nothing /** Determine whether this cmd misses, and the new state (on hit) or param to be sent (on miss) */ private def growStarter(cmd: UInt): (Bool, UInt) = { import MemoryOpCategories._ import TLPermissions._ import ClientStates._ val c = categorize(cmd) MuxTLookup(Cat(c, state), (false.B, 0.U), Seq( //(effect, am now) -> (was a hit, next) Cat(rd, Dirty) -> (true.B, Dirty), Cat(rd, Trunk) -> (true.B, Trunk), Cat(rd, Branch) -> (true.B, Branch), Cat(wi, Dirty) -> (true.B, Dirty), Cat(wi, Trunk) -> (true.B, Trunk), Cat(wr, Dirty) -> (true.B, Dirty), Cat(wr, Trunk) -> (true.B, Dirty), //(effect, am now) -> (was a miss, param) Cat(rd, Nothing) -> (false.B, NtoB), Cat(wi, Branch) -> (false.B, BtoT), Cat(wi, Nothing) -> (false.B, NtoT), Cat(wr, Branch) -> (false.B, BtoT), Cat(wr, Nothing) -> (false.B, NtoT))) } /** Determine what state to go to after miss based on Grant param * For now, doesn't depend on state (which may have been Probed). */ private def growFinisher(cmd: UInt, param: UInt): UInt = { import MemoryOpCategories._ import TLPermissions._ import ClientStates._ val c = categorize(cmd) //assert(c === rd || param === toT, "Client was expecting trunk permissions.") MuxLookup(Cat(c, param), Nothing)(Seq( //(effect param) -> (next) Cat(rd, toB) -> Branch, Cat(rd, toT) -> Trunk, Cat(wi, toT) -> Trunk, Cat(wr, toT) -> Dirty)) } /** Does this cache have permissions on this block sufficient to perform op, * and what to do next (Acquire message param or updated metadata). */ def onAccess(cmd: UInt): (Bool, UInt, ClientMetadata) = { val r = growStarter(cmd) (r._1, r._2, ClientMetadata(r._2)) } /** Does a secondary miss on the block require another Acquire message */ def onSecondaryAccess(first_cmd: UInt, second_cmd: UInt): (Bool, Bool, UInt, ClientMetadata, UInt) = { import MemoryOpCategories._ val r1 = growStarter(first_cmd) val r2 = growStarter(second_cmd) val needs_second_acq = isWriteIntent(second_cmd) && !isWriteIntent(first_cmd) val hit_again = r1._1 && r2._1 val dirties = categorize(second_cmd) === wr val biggest_grow_param = Mux(dirties, r2._2, r1._2) val dirtiest_state = ClientMetadata(biggest_grow_param) val dirtiest_cmd = Mux(dirties, second_cmd, first_cmd) (needs_second_acq, hit_again, biggest_grow_param, dirtiest_state, dirtiest_cmd) } /** Metadata change on a returned Grant */ def onGrant(cmd: UInt, param: UInt): ClientMetadata = ClientMetadata(growFinisher(cmd, param)) /** Determine what state to go to based on Probe param */ private def shrinkHelper(param: UInt): (Bool, UInt, UInt) = { import ClientStates._ import TLPermissions._ MuxTLookup(Cat(param, state), (false.B, 0.U, 0.U), Seq( //(wanted, am now) -> (hasDirtyData resp, next) Cat(toT, Dirty) -> (true.B, TtoT, Trunk), Cat(toT, Trunk) -> (false.B, TtoT, Trunk), Cat(toT, Branch) -> (false.B, BtoB, Branch), Cat(toT, Nothing) -> (false.B, NtoN, Nothing), Cat(toB, Dirty) -> (true.B, TtoB, Branch), Cat(toB, Trunk) -> (false.B, TtoB, Branch), // Policy: Don't notify on clean downgrade Cat(toB, Branch) -> (false.B, BtoB, Branch), Cat(toB, Nothing) -> (false.B, NtoN, Nothing), Cat(toN, Dirty) -> (true.B, TtoN, Nothing), Cat(toN, Trunk) -> (false.B, TtoN, Nothing), // Policy: Don't notify on clean downgrade Cat(toN, Branch) -> (false.B, BtoN, Nothing), // Policy: Don't notify on clean downgrade Cat(toN, Nothing) -> (false.B, NtoN, Nothing))) } /** Translate cache control cmds into Probe param */ private def cmdToPermCap(cmd: UInt): UInt = { import MemoryOpCategories._ import TLPermissions._ MuxLookup(cmd, toN)(Seq( M_FLUSH -> toN, M_PRODUCE -> toB, M_CLEAN -> toT)) } def onCacheControl(cmd: UInt): (Bool, UInt, ClientMetadata) = { val r = shrinkHelper(cmdToPermCap(cmd)) (r._1, r._2, ClientMetadata(r._3)) } def onProbe(param: UInt): (Bool, UInt, ClientMetadata) = { val r = shrinkHelper(param) (r._1, r._2, ClientMetadata(r._3)) } } /** Factories for ClientMetadata, including on reset */ object ClientMetadata { def apply(perm: UInt) = { val meta = Wire(new ClientMetadata) meta.state := perm meta } def onReset = ClientMetadata(ClientStates.Nothing) def maximum = ClientMetadata(ClientStates.Dirty) } File Consts.scala: // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket.constants import chisel3._ import chisel3.util._ import freechips.rocketchip.util._ trait ScalarOpConstants { val SZ_BR = 3 def BR_X = BitPat("b???") def BR_EQ = 0.U(3.W) def BR_NE = 1.U(3.W) def BR_J = 2.U(3.W) def BR_N = 3.U(3.W) def BR_LT = 4.U(3.W) def BR_GE = 5.U(3.W) def BR_LTU = 6.U(3.W) def BR_GEU = 7.U(3.W) def A1_X = BitPat("b??") def A1_ZERO = 0.U(2.W) def A1_RS1 = 1.U(2.W) def A1_PC = 2.U(2.W) def A1_RS1SHL = 3.U(2.W) def IMM_X = BitPat("b???") def IMM_S = 0.U(3.W) def IMM_SB = 1.U(3.W) def IMM_U = 2.U(3.W) def IMM_UJ = 3.U(3.W) def IMM_I = 4.U(3.W) def IMM_Z = 5.U(3.W) def A2_X = BitPat("b???") def A2_ZERO = 0.U(3.W) def A2_SIZE = 1.U(3.W) def A2_RS2 = 2.U(3.W) def A2_IMM = 3.U(3.W) def A2_RS2OH = 4.U(3.W) def A2_IMMOH = 5.U(3.W) def X = BitPat("b?") def N = BitPat("b0") def Y = BitPat("b1") val SZ_DW = 1 def DW_X = X def DW_32 = false.B def DW_64 = true.B def DW_XPR = DW_64 } trait MemoryOpConstants { val NUM_XA_OPS = 9 val M_SZ = 5 def M_X = BitPat("b?????"); def M_XRD = "b00000".U; // int load def M_XWR = "b00001".U; // int store def M_PFR = "b00010".U; // prefetch with intent to read def M_PFW = "b00011".U; // prefetch with intent to write def M_XA_SWAP = "b00100".U def M_FLUSH_ALL = "b00101".U // flush all lines def M_XLR = "b00110".U def M_XSC = "b00111".U def M_XA_ADD = "b01000".U def M_XA_XOR = "b01001".U def M_XA_OR = "b01010".U def M_XA_AND = "b01011".U def M_XA_MIN = "b01100".U def M_XA_MAX = "b01101".U def M_XA_MINU = "b01110".U def M_XA_MAXU = "b01111".U def M_FLUSH = "b10000".U // write back dirty data and cede R/W permissions def M_PWR = "b10001".U // partial (masked) store def M_PRODUCE = "b10010".U // write back dirty data and cede W permissions def M_CLEAN = "b10011".U // write back dirty data and retain R/W permissions def M_SFENCE = "b10100".U // SFENCE.VMA def M_HFENCEV = "b10101".U // HFENCE.VVMA def M_HFENCEG = "b10110".U // HFENCE.GVMA def M_WOK = "b10111".U // check write permissions but don't perform a write def M_HLVX = "b10000".U // HLVX instruction def isAMOLogical(cmd: UInt) = cmd.isOneOf(M_XA_SWAP, M_XA_XOR, M_XA_OR, M_XA_AND) def isAMOArithmetic(cmd: UInt) = cmd.isOneOf(M_XA_ADD, M_XA_MIN, M_XA_MAX, M_XA_MINU, M_XA_MAXU) def isAMO(cmd: UInt) = isAMOLogical(cmd) || isAMOArithmetic(cmd) def isPrefetch(cmd: UInt) = cmd === M_PFR || cmd === M_PFW def isRead(cmd: UInt) = cmd.isOneOf(M_XRD, M_HLVX, M_XLR, M_XSC) || isAMO(cmd) def isWrite(cmd: UInt) = cmd === M_XWR || cmd === M_PWR || cmd === M_XSC || isAMO(cmd) def isWriteIntent(cmd: UInt) = isWrite(cmd) || cmd === M_PFW || cmd === M_XLR } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File mshrs.scala: //****************************************************************************** // Ported from Rocket-Chip // See LICENSE.Berkeley and LICENSE.SiFive in Rocket-Chip for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.lsu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.tile._ import freechips.rocketchip.util._ import freechips.rocketchip.rocket._ import boom.v3.common._ import boom.v3.exu.BrUpdateInfo import boom.v3.util.{IsKilledByBranch, GetNewBrMask, BranchKillableQueue, IsOlder, UpdateBrMask, AgePriorityEncoder, WrapInc} class BoomDCacheReqInternal(implicit p: Parameters) extends BoomDCacheReq()(p) with HasL1HellaCacheParameters { // miss info val tag_match = Bool() val old_meta = new L1Metadata val way_en = UInt(nWays.W) // Used in the MSHRs val sdq_id = UInt(log2Ceil(cfg.nSDQ).W) } class BoomMSHR(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p) with HasL1HellaCacheParameters { val io = IO(new Bundle { val id = Input(UInt()) val req_pri_val = Input(Bool()) val req_pri_rdy = Output(Bool()) val req_sec_val = Input(Bool()) val req_sec_rdy = Output(Bool()) val clear_prefetch = Input(Bool()) val brupdate = Input(new BrUpdateInfo) val exception = Input(Bool()) val rob_pnr_idx = Input(UInt(robAddrSz.W)) val rob_head_idx = Input(UInt(robAddrSz.W)) val req = Input(new BoomDCacheReqInternal) val req_is_probe = Input(Bool()) val idx = Output(Valid(UInt())) val way = Output(Valid(UInt())) val tag = Output(Valid(UInt())) val mem_acquire = Decoupled(new TLBundleA(edge.bundle)) val mem_grant = Flipped(Decoupled(new TLBundleD(edge.bundle))) val mem_finish = Decoupled(new TLBundleE(edge.bundle)) val prober_state = Input(Valid(UInt(coreMaxAddrBits.W))) val refill = Decoupled(new L1DataWriteReq) val meta_write = Decoupled(new L1MetaWriteReq) val meta_read = Decoupled(new L1MetaReadReq) val meta_resp = Input(Valid(new L1Metadata)) val wb_req = Decoupled(new WritebackReq(edge.bundle)) // To inform the prefetcher when we are commiting the fetch of this line val commit_val = Output(Bool()) val commit_addr = Output(UInt(coreMaxAddrBits.W)) val commit_coh = Output(new ClientMetadata) // Reading from the line buffer val lb_read = Decoupled(new LineBufferReadReq) val lb_resp = Input(UInt(encRowBits.W)) val lb_write = Decoupled(new LineBufferWriteReq) // Replays go through the cache pipeline again val replay = Decoupled(new BoomDCacheReqInternal) // Resp go straight out to the core val resp = Decoupled(new BoomDCacheResp) // Writeback unit tells us when it is done processing our wb val wb_resp = Input(Bool()) val probe_rdy = Output(Bool()) }) // TODO: Optimize this. We don't want to mess with cache during speculation // s_refill_req : Make a request for a new cache line // s_refill_resp : Store the refill response into our buffer // s_drain_rpq_loads : Drain out loads from the rpq // : If miss was misspeculated, go to s_invalid // s_wb_req : Write back the evicted cache line // s_wb_resp : Finish writing back the evicted cache line // s_meta_write_req : Write the metadata for new cache lne // s_meta_write_resp : val s_invalid :: s_refill_req :: s_refill_resp :: s_drain_rpq_loads :: s_meta_read :: s_meta_resp_1 :: s_meta_resp_2 :: s_meta_clear :: s_wb_meta_read :: s_wb_req :: s_wb_resp :: s_commit_line :: s_drain_rpq :: s_meta_write_req :: s_mem_finish_1 :: s_mem_finish_2 :: s_prefetched :: s_prefetch :: Nil = Enum(18) val state = RegInit(s_invalid) val req = Reg(new BoomDCacheReqInternal) val req_idx = req.addr(untagBits-1, blockOffBits) val req_tag = req.addr >> untagBits val req_block_addr = (req.addr >> blockOffBits) << blockOffBits val req_needs_wb = RegInit(false.B) val new_coh = RegInit(ClientMetadata.onReset) val (_, shrink_param, coh_on_clear) = req.old_meta.coh.onCacheControl(M_FLUSH) val grow_param = new_coh.onAccess(req.uop.mem_cmd)._2 val coh_on_grant = new_coh.onGrant(req.uop.mem_cmd, io.mem_grant.bits.param) // We only accept secondary misses if the original request had sufficient permissions val (cmd_requires_second_acquire, is_hit_again, _, dirtier_coh, dirtier_cmd) = new_coh.onSecondaryAccess(req.uop.mem_cmd, io.req.uop.mem_cmd) val (_, _, refill_done, refill_address_inc) = edge.addr_inc(io.mem_grant) val sec_rdy = (!cmd_requires_second_acquire && !io.req_is_probe && !state.isOneOf(s_invalid, s_meta_write_req, s_mem_finish_1, s_mem_finish_2))// Always accept secondary misses val rpq = Module(new BranchKillableQueue(new BoomDCacheReqInternal, cfg.nRPQ, u => u.uses_ldq, false)) rpq.io.brupdate := io.brupdate rpq.io.flush := io.exception assert(!(state === s_invalid && !rpq.io.empty)) rpq.io.enq.valid := ((io.req_pri_val && io.req_pri_rdy) || (io.req_sec_val && io.req_sec_rdy)) && !isPrefetch(io.req.uop.mem_cmd) rpq.io.enq.bits := io.req rpq.io.deq.ready := false.B val grantack = Reg(Valid(new TLBundleE(edge.bundle))) val refill_ctr = Reg(UInt(log2Ceil(cacheDataBeats).W)) val commit_line = Reg(Bool()) val grant_had_data = Reg(Bool()) val finish_to_prefetch = Reg(Bool()) // Block probes if a tag write we started is still in the pipeline val meta_hazard = RegInit(0.U(2.W)) when (meta_hazard =/= 0.U) { meta_hazard := meta_hazard + 1.U } when (io.meta_write.fire) { meta_hazard := 1.U } io.probe_rdy := (meta_hazard === 0.U && (state.isOneOf(s_invalid, s_refill_req, s_refill_resp, s_drain_rpq_loads) || (state === s_meta_read && grantack.valid))) io.idx.valid := state =/= s_invalid io.tag.valid := state =/= s_invalid io.way.valid := !state.isOneOf(s_invalid, s_prefetch) io.idx.bits := req_idx io.tag.bits := req_tag io.way.bits := req.way_en io.meta_write.valid := false.B io.meta_write.bits := DontCare io.req_pri_rdy := false.B io.req_sec_rdy := sec_rdy && rpq.io.enq.ready io.mem_acquire.valid := false.B io.mem_acquire.bits := DontCare io.refill.valid := false.B io.refill.bits := DontCare io.replay.valid := false.B io.replay.bits := DontCare io.wb_req.valid := false.B io.wb_req.bits := DontCare io.resp.valid := false.B io.resp.bits := DontCare io.commit_val := false.B io.commit_addr := req.addr io.commit_coh := coh_on_grant io.meta_read.valid := false.B io.meta_read.bits := DontCare io.mem_finish.valid := false.B io.mem_finish.bits := DontCare io.lb_write.valid := false.B io.lb_write.bits := DontCare io.lb_read.valid := false.B io.lb_read.bits := DontCare io.mem_grant.ready := false.B when (io.req_sec_val && io.req_sec_rdy) { req.uop.mem_cmd := dirtier_cmd when (is_hit_again) { new_coh := dirtier_coh } } def handle_pri_req(old_state: UInt): UInt = { val new_state = WireInit(old_state) grantack.valid := false.B refill_ctr := 0.U assert(rpq.io.enq.ready) req := io.req val old_coh = io.req.old_meta.coh req_needs_wb := old_coh.onCacheControl(M_FLUSH)._1 // does the line we are evicting need to be written back when (io.req.tag_match) { val (is_hit, _, coh_on_hit) = old_coh.onAccess(io.req.uop.mem_cmd) when (is_hit) { // set dirty bit assert(isWrite(io.req.uop.mem_cmd)) new_coh := coh_on_hit new_state := s_drain_rpq } .otherwise { // upgrade permissions new_coh := old_coh new_state := s_refill_req } } .otherwise { // refill and writeback if necessary new_coh := ClientMetadata.onReset new_state := s_refill_req } new_state } when (state === s_invalid) { io.req_pri_rdy := true.B grant_had_data := false.B when (io.req_pri_val && io.req_pri_rdy) { state := handle_pri_req(state) } } .elsewhen (state === s_refill_req) { io.mem_acquire.valid := true.B // TODO: Use AcquirePerm if just doing permissions acquire io.mem_acquire.bits := edge.AcquireBlock( fromSource = io.id, toAddress = Cat(req_tag, req_idx) << blockOffBits, lgSize = lgCacheBlockBytes.U, growPermissions = grow_param)._2 when (io.mem_acquire.fire) { state := s_refill_resp } } .elsewhen (state === s_refill_resp) { when (edge.hasData(io.mem_grant.bits)) { io.mem_grant.ready := io.lb_write.ready io.lb_write.valid := io.mem_grant.valid io.lb_write.bits.id := io.id io.lb_write.bits.offset := refill_address_inc >> rowOffBits io.lb_write.bits.data := io.mem_grant.bits.data } .otherwise { io.mem_grant.ready := true.B } when (io.mem_grant.fire) { grant_had_data := edge.hasData(io.mem_grant.bits) } when (refill_done) { grantack.valid := edge.isRequest(io.mem_grant.bits) grantack.bits := edge.GrantAck(io.mem_grant.bits) state := Mux(grant_had_data, s_drain_rpq_loads, s_drain_rpq) assert(!(!grant_had_data && req_needs_wb)) commit_line := false.B new_coh := coh_on_grant } } .elsewhen (state === s_drain_rpq_loads) { val drain_load = (isRead(rpq.io.deq.bits.uop.mem_cmd) && !isWrite(rpq.io.deq.bits.uop.mem_cmd) && (rpq.io.deq.bits.uop.mem_cmd =/= M_XLR)) // LR should go through replay // drain all loads for now val rp_addr = Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0)) val word_idx = if (rowWords == 1) 0.U else rp_addr(log2Up(rowWords*coreDataBytes)-1, log2Up(wordBytes)) val data = io.lb_resp val data_word = data >> Cat(word_idx, 0.U(log2Up(coreDataBits).W)) val loadgen = new LoadGen(rpq.io.deq.bits.uop.mem_size, rpq.io.deq.bits.uop.mem_signed, Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0)), data_word, false.B, wordBytes) rpq.io.deq.ready := io.resp.ready && io.lb_read.ready && drain_load io.lb_read.valid := rpq.io.deq.valid && drain_load io.lb_read.bits.id := io.id io.lb_read.bits.offset := rpq.io.deq.bits.addr >> rowOffBits io.resp.valid := rpq.io.deq.valid && io.lb_read.fire && drain_load io.resp.bits.uop := rpq.io.deq.bits.uop io.resp.bits.data := loadgen.data io.resp.bits.is_hella := rpq.io.deq.bits.is_hella when (rpq.io.deq.fire) { commit_line := true.B } .elsewhen (rpq.io.empty && !commit_line) { when (!rpq.io.enq.fire) { state := s_mem_finish_1 finish_to_prefetch := enablePrefetching.B } } .elsewhen (rpq.io.empty || (rpq.io.deq.valid && !drain_load)) { // io.commit_val is for the prefetcher. it tells the prefetcher that this line was correctly acquired // The prefetcher should consider fetching the next line io.commit_val := true.B state := s_meta_read } } .elsewhen (state === s_meta_read) { io.meta_read.valid := !io.prober_state.valid || !grantack.valid || (io.prober_state.bits(untagBits-1,blockOffBits) =/= req_idx) io.meta_read.bits.idx := req_idx io.meta_read.bits.tag := req_tag io.meta_read.bits.way_en := req.way_en when (io.meta_read.fire) { state := s_meta_resp_1 } } .elsewhen (state === s_meta_resp_1) { state := s_meta_resp_2 } .elsewhen (state === s_meta_resp_2) { val needs_wb = io.meta_resp.bits.coh.onCacheControl(M_FLUSH)._1 state := Mux(!io.meta_resp.valid, s_meta_read, // Prober could have nack'd this read Mux(needs_wb, s_meta_clear, s_commit_line)) } .elsewhen (state === s_meta_clear) { io.meta_write.valid := true.B io.meta_write.bits.idx := req_idx io.meta_write.bits.data.coh := coh_on_clear io.meta_write.bits.data.tag := req_tag io.meta_write.bits.way_en := req.way_en when (io.meta_write.fire) { state := s_wb_req } } .elsewhen (state === s_wb_req) { io.wb_req.valid := true.B io.wb_req.bits.tag := req.old_meta.tag io.wb_req.bits.idx := req_idx io.wb_req.bits.param := shrink_param io.wb_req.bits.way_en := req.way_en io.wb_req.bits.source := io.id io.wb_req.bits.voluntary := true.B when (io.wb_req.fire) { state := s_wb_resp } } .elsewhen (state === s_wb_resp) { when (io.wb_resp) { state := s_commit_line } } .elsewhen (state === s_commit_line) { io.lb_read.valid := true.B io.lb_read.bits.id := io.id io.lb_read.bits.offset := refill_ctr io.refill.valid := io.lb_read.fire io.refill.bits.addr := req_block_addr | (refill_ctr << rowOffBits) io.refill.bits.way_en := req.way_en io.refill.bits.wmask := ~(0.U(rowWords.W)) io.refill.bits.data := io.lb_resp when (io.refill.fire) { refill_ctr := refill_ctr + 1.U when (refill_ctr === (cacheDataBeats - 1).U) { state := s_drain_rpq } } } .elsewhen (state === s_drain_rpq) { io.replay <> rpq.io.deq io.replay.bits.way_en := req.way_en io.replay.bits.addr := Cat(req_tag, req_idx, rpq.io.deq.bits.addr(blockOffBits-1,0)) when (io.replay.fire && isWrite(rpq.io.deq.bits.uop.mem_cmd)) { // Set dirty bit val (is_hit, _, coh_on_hit) = new_coh.onAccess(rpq.io.deq.bits.uop.mem_cmd) assert(is_hit, "We still don't have permissions for this store") new_coh := coh_on_hit } when (rpq.io.empty && !rpq.io.enq.valid) { state := s_meta_write_req } } .elsewhen (state === s_meta_write_req) { io.meta_write.valid := true.B io.meta_write.bits.idx := req_idx io.meta_write.bits.data.coh := new_coh io.meta_write.bits.data.tag := req_tag io.meta_write.bits.way_en := req.way_en when (io.meta_write.fire) { state := s_mem_finish_1 finish_to_prefetch := false.B } } .elsewhen (state === s_mem_finish_1) { io.mem_finish.valid := grantack.valid io.mem_finish.bits := grantack.bits when (io.mem_finish.fire || !grantack.valid) { grantack.valid := false.B state := s_mem_finish_2 } } .elsewhen (state === s_mem_finish_2) { state := Mux(finish_to_prefetch, s_prefetch, s_invalid) } .elsewhen (state === s_prefetch) { io.req_pri_rdy := true.B when ((io.req_sec_val && !io.req_sec_rdy) || io.clear_prefetch) { state := s_invalid } .elsewhen (io.req_sec_val && io.req_sec_rdy) { val (is_hit, _, coh_on_hit) = new_coh.onAccess(io.req.uop.mem_cmd) when (is_hit) { // Proceed with refill new_coh := coh_on_hit state := s_meta_read } .otherwise { // Reacquire this line new_coh := ClientMetadata.onReset state := s_refill_req } } .elsewhen (io.req_pri_val && io.req_pri_rdy) { grant_had_data := false.B state := handle_pri_req(state) } } } class BoomIOMSHR(id: Int)(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p) with HasL1HellaCacheParameters { val io = IO(new Bundle { val req = Flipped(Decoupled(new BoomDCacheReq)) val resp = Decoupled(new BoomDCacheResp) val mem_access = Decoupled(new TLBundleA(edge.bundle)) val mem_ack = Flipped(Valid(new TLBundleD(edge.bundle))) // We don't need brupdate in here because uncacheable operations are guaranteed non-speculative }) def beatOffset(addr: UInt) = addr.extract(beatOffBits-1, wordOffBits) def wordFromBeat(addr: UInt, dat: UInt) = { val shift = Cat(beatOffset(addr), 0.U((wordOffBits+log2Ceil(wordBytes)).W)) (dat >> shift)(wordBits-1, 0) } val req = Reg(new BoomDCacheReq) val grant_word = Reg(UInt(wordBits.W)) val s_idle :: s_mem_access :: s_mem_ack :: s_resp :: Nil = Enum(4) val state = RegInit(s_idle) io.req.ready := state === s_idle val loadgen = new LoadGen(req.uop.mem_size, req.uop.mem_signed, req.addr, grant_word, false.B, wordBytes) val a_source = id.U val a_address = req.addr val a_size = req.uop.mem_size val a_data = Fill(beatWords, req.data) val get = edge.Get(a_source, a_address, a_size)._2 val put = edge.Put(a_source, a_address, a_size, a_data)._2 val atomics = if (edge.manager.anySupportLogical) { MuxLookup(req.uop.mem_cmd, (0.U).asTypeOf(new TLBundleA(edge.bundle)))(Array( M_XA_SWAP -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.SWAP)._2, M_XA_XOR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.XOR) ._2, M_XA_OR -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.OR) ._2, M_XA_AND -> edge.Logical(a_source, a_address, a_size, a_data, TLAtomics.AND) ._2, M_XA_ADD -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.ADD)._2, M_XA_MIN -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MIN)._2, M_XA_MAX -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAX)._2, M_XA_MINU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MINU)._2, M_XA_MAXU -> edge.Arithmetic(a_source, a_address, a_size, a_data, TLAtomics.MAXU)._2)) } else { // If no managers support atomics, assert fail if processor asks for them assert(state === s_idle || !isAMO(req.uop.mem_cmd)) (0.U).asTypeOf(new TLBundleA(edge.bundle)) } assert(state === s_idle || req.uop.mem_cmd =/= M_XSC) io.mem_access.valid := state === s_mem_access io.mem_access.bits := Mux(isAMO(req.uop.mem_cmd), atomics, Mux(isRead(req.uop.mem_cmd), get, put)) val send_resp = isRead(req.uop.mem_cmd) io.resp.valid := (state === s_resp) && send_resp io.resp.bits.is_hella := req.is_hella io.resp.bits.uop := req.uop io.resp.bits.data := loadgen.data when (io.req.fire) { req := io.req.bits state := s_mem_access } when (io.mem_access.fire) { state := s_mem_ack } when (state === s_mem_ack && io.mem_ack.valid) { state := s_resp when (isRead(req.uop.mem_cmd)) { grant_word := wordFromBeat(req.addr, io.mem_ack.bits.data) } } when (state === s_resp) { when (!send_resp || io.resp.fire) { state := s_idle } } } class LineBufferReadReq(implicit p: Parameters) extends BoomBundle()(p) with HasL1HellaCacheParameters { val id = UInt(log2Ceil(nLBEntries).W) val offset = UInt(log2Ceil(cacheDataBeats).W) def lb_addr = Cat(id, offset) } class LineBufferWriteReq(implicit p: Parameters) extends LineBufferReadReq()(p) { val data = UInt(encRowBits.W) } class LineBufferMetaWriteReq(implicit p: Parameters) extends BoomBundle()(p) { val id = UInt(log2Ceil(nLBEntries).W) val coh = new ClientMetadata val addr = UInt(coreMaxAddrBits.W) } class LineBufferMeta(implicit p: Parameters) extends BoomBundle()(p) with HasL1HellaCacheParameters { val coh = new ClientMetadata val addr = UInt(coreMaxAddrBits.W) } class BoomMSHRFile(implicit edge: TLEdgeOut, p: Parameters) extends BoomModule()(p) with HasL1HellaCacheParameters { val io = IO(new Bundle { val req = Flipped(Vec(memWidth, Decoupled(new BoomDCacheReqInternal))) // Req from s2 of DCache pipe val req_is_probe = Input(Vec(memWidth, Bool())) val resp = Decoupled(new BoomDCacheResp) val secondary_miss = Output(Vec(memWidth, Bool())) val block_hit = Output(Vec(memWidth, Bool())) val brupdate = Input(new BrUpdateInfo) val exception = Input(Bool()) val rob_pnr_idx = Input(UInt(robAddrSz.W)) val rob_head_idx = Input(UInt(robAddrSz.W)) val mem_acquire = Decoupled(new TLBundleA(edge.bundle)) val mem_grant = Flipped(Decoupled(new TLBundleD(edge.bundle))) val mem_finish = Decoupled(new TLBundleE(edge.bundle)) val refill = Decoupled(new L1DataWriteReq) val meta_write = Decoupled(new L1MetaWriteReq) val meta_read = Decoupled(new L1MetaReadReq) val meta_resp = Input(Valid(new L1Metadata)) val replay = Decoupled(new BoomDCacheReqInternal) val prefetch = Decoupled(new BoomDCacheReq) val wb_req = Decoupled(new WritebackReq(edge.bundle)) val prober_state = Input(Valid(UInt(coreMaxAddrBits.W))) val clear_all = Input(Bool()) // Clears all uncommitted MSHRs to prepare for fence val wb_resp = Input(Bool()) val fence_rdy = Output(Bool()) val probe_rdy = Output(Bool()) }) val req_idx = OHToUInt(io.req.map(_.valid)) val req = io.req(req_idx) val req_is_probe = io.req_is_probe(0) for (w <- 0 until memWidth) io.req(w).ready := false.B val prefetcher: DataPrefetcher = if (enablePrefetching) Module(new NLPrefetcher) else Module(new NullPrefetcher) io.prefetch <> prefetcher.io.prefetch val cacheable = edge.manager.supportsAcquireBFast(req.bits.addr, lgCacheBlockBytes.U) // -------------------- // The MSHR SDQ val sdq_val = RegInit(0.U(cfg.nSDQ.W)) val sdq_alloc_id = PriorityEncoder(~sdq_val(cfg.nSDQ-1,0)) val sdq_rdy = !sdq_val.andR val sdq_enq = req.fire && cacheable && isWrite(req.bits.uop.mem_cmd) val sdq = Mem(cfg.nSDQ, UInt(coreDataBits.W)) when (sdq_enq) { sdq(sdq_alloc_id) := req.bits.data } // -------------------- // The LineBuffer Data // Holds refilling lines, prefetched lines val lb = Mem(nLBEntries * cacheDataBeats, UInt(encRowBits.W)) val lb_read_arb = Module(new Arbiter(new LineBufferReadReq, cfg.nMSHRs)) val lb_write_arb = Module(new Arbiter(new LineBufferWriteReq, cfg.nMSHRs)) lb_read_arb.io.out.ready := false.B lb_write_arb.io.out.ready := true.B val lb_read_data = WireInit(0.U(encRowBits.W)) when (lb_write_arb.io.out.fire) { lb.write(lb_write_arb.io.out.bits.lb_addr, lb_write_arb.io.out.bits.data) } .otherwise { lb_read_arb.io.out.ready := true.B when (lb_read_arb.io.out.fire) { lb_read_data := lb.read(lb_read_arb.io.out.bits.lb_addr) } } def widthMap[T <: Data](f: Int => T) = VecInit((0 until memWidth).map(f)) val idx_matches = Wire(Vec(memWidth, Vec(cfg.nMSHRs, Bool()))) val tag_matches = Wire(Vec(memWidth, Vec(cfg.nMSHRs, Bool()))) val way_matches = Wire(Vec(memWidth, Vec(cfg.nMSHRs, Bool()))) val tag_match = widthMap(w => Mux1H(idx_matches(w), tag_matches(w))) val idx_match = widthMap(w => idx_matches(w).reduce(_||_)) val way_match = widthMap(w => Mux1H(idx_matches(w), way_matches(w))) val wb_tag_list = Wire(Vec(cfg.nMSHRs, UInt(tagBits.W))) val meta_write_arb = Module(new Arbiter(new L1MetaWriteReq , cfg.nMSHRs)) val meta_read_arb = Module(new Arbiter(new L1MetaReadReq , cfg.nMSHRs)) val wb_req_arb = Module(new Arbiter(new WritebackReq(edge.bundle), cfg.nMSHRs)) val replay_arb = Module(new Arbiter(new BoomDCacheReqInternal , cfg.nMSHRs)) val resp_arb = Module(new Arbiter(new BoomDCacheResp , cfg.nMSHRs + nIOMSHRs)) val refill_arb = Module(new Arbiter(new L1DataWriteReq , cfg.nMSHRs)) val commit_vals = Wire(Vec(cfg.nMSHRs, Bool())) val commit_addrs = Wire(Vec(cfg.nMSHRs, UInt(coreMaxAddrBits.W))) val commit_cohs = Wire(Vec(cfg.nMSHRs, new ClientMetadata)) var sec_rdy = false.B io.fence_rdy := true.B io.probe_rdy := true.B io.mem_grant.ready := false.B val mshr_alloc_idx = Wire(UInt()) val pri_rdy = WireInit(false.B) val pri_val = req.valid && sdq_rdy && cacheable && !idx_match(req_idx) val mshrs = (0 until cfg.nMSHRs) map { i => val mshr = Module(new BoomMSHR) mshr.io.id := i.U(log2Ceil(cfg.nMSHRs).W) for (w <- 0 until memWidth) { idx_matches(w)(i) := mshr.io.idx.valid && mshr.io.idx.bits === io.req(w).bits.addr(untagBits-1,blockOffBits) tag_matches(w)(i) := mshr.io.tag.valid && mshr.io.tag.bits === io.req(w).bits.addr >> untagBits way_matches(w)(i) := mshr.io.way.valid && mshr.io.way.bits === io.req(w).bits.way_en } wb_tag_list(i) := mshr.io.wb_req.bits.tag mshr.io.req_pri_val := (i.U === mshr_alloc_idx) && pri_val when (i.U === mshr_alloc_idx) { pri_rdy := mshr.io.req_pri_rdy } mshr.io.req_sec_val := req.valid && sdq_rdy && tag_match(req_idx) && idx_matches(req_idx)(i) && cacheable mshr.io.req := req.bits mshr.io.req_is_probe := req_is_probe mshr.io.req.sdq_id := sdq_alloc_id // Clear because of a FENCE, a request to the same idx as a prefetched line, // a probe to that prefetched line, all mshrs are in use mshr.io.clear_prefetch := ((io.clear_all && !req.valid)|| (req.valid && idx_matches(req_idx)(i) && cacheable && !tag_match(req_idx)) || (req_is_probe && idx_matches(req_idx)(i))) mshr.io.brupdate := io.brupdate mshr.io.exception := io.exception mshr.io.rob_pnr_idx := io.rob_pnr_idx mshr.io.rob_head_idx := io.rob_head_idx mshr.io.prober_state := io.prober_state mshr.io.wb_resp := io.wb_resp meta_write_arb.io.in(i) <> mshr.io.meta_write meta_read_arb.io.in(i) <> mshr.io.meta_read mshr.io.meta_resp := io.meta_resp wb_req_arb.io.in(i) <> mshr.io.wb_req replay_arb.io.in(i) <> mshr.io.replay refill_arb.io.in(i) <> mshr.io.refill lb_read_arb.io.in(i) <> mshr.io.lb_read mshr.io.lb_resp := lb_read_data lb_write_arb.io.in(i) <> mshr.io.lb_write commit_vals(i) := mshr.io.commit_val commit_addrs(i) := mshr.io.commit_addr commit_cohs(i) := mshr.io.commit_coh mshr.io.mem_grant.valid := false.B mshr.io.mem_grant.bits := DontCare when (io.mem_grant.bits.source === i.U) { mshr.io.mem_grant <> io.mem_grant } sec_rdy = sec_rdy || (mshr.io.req_sec_rdy && mshr.io.req_sec_val) resp_arb.io.in(i) <> mshr.io.resp when (!mshr.io.req_pri_rdy) { io.fence_rdy := false.B } for (w <- 0 until memWidth) { when (!mshr.io.probe_rdy && idx_matches(w)(i) && io.req_is_probe(w)) { io.probe_rdy := false.B } } mshr } // Try to round-robin the MSHRs val mshr_head = RegInit(0.U(log2Ceil(cfg.nMSHRs).W)) mshr_alloc_idx := RegNext(AgePriorityEncoder(mshrs.map(m=>m.io.req_pri_rdy), mshr_head)) when (pri_rdy && pri_val) { mshr_head := WrapInc(mshr_head, cfg.nMSHRs) } io.meta_write <> meta_write_arb.io.out io.meta_read <> meta_read_arb.io.out io.wb_req <> wb_req_arb.io.out val mmio_alloc_arb = Module(new Arbiter(Bool(), nIOMSHRs)) var mmio_rdy = false.B val mmios = (0 until nIOMSHRs) map { i => val id = cfg.nMSHRs + 1 + i // +1 for wb unit val mshr = Module(new BoomIOMSHR(id)) mmio_alloc_arb.io.in(i).valid := mshr.io.req.ready mmio_alloc_arb.io.in(i).bits := DontCare mshr.io.req.valid := mmio_alloc_arb.io.in(i).ready mshr.io.req.bits := req.bits mmio_rdy = mmio_rdy || mshr.io.req.ready mshr.io.mem_ack.bits := io.mem_grant.bits mshr.io.mem_ack.valid := io.mem_grant.valid && io.mem_grant.bits.source === id.U when (io.mem_grant.bits.source === id.U) { io.mem_grant.ready := true.B } resp_arb.io.in(cfg.nMSHRs + i) <> mshr.io.resp when (!mshr.io.req.ready) { io.fence_rdy := false.B } mshr } mmio_alloc_arb.io.out.ready := req.valid && !cacheable TLArbiter.lowestFromSeq(edge, io.mem_acquire, mshrs.map(_.io.mem_acquire) ++ mmios.map(_.io.mem_access)) TLArbiter.lowestFromSeq(edge, io.mem_finish, mshrs.map(_.io.mem_finish)) val respq = Module(new BranchKillableQueue(new BoomDCacheResp, 4, u => u.uses_ldq, flow = false)) respq.io.brupdate := io.brupdate respq.io.flush := io.exception respq.io.enq <> resp_arb.io.out io.resp <> respq.io.deq for (w <- 0 until memWidth) { io.req(w).ready := (w.U === req_idx) && Mux(!cacheable, mmio_rdy, sdq_rdy && Mux(idx_match(w), tag_match(w) && sec_rdy, pri_rdy)) io.secondary_miss(w) := idx_match(w) && way_match(w) && !tag_match(w) io.block_hit(w) := idx_match(w) && tag_match(w) } io.refill <> refill_arb.io.out val free_sdq = io.replay.fire && isWrite(io.replay.bits.uop.mem_cmd) io.replay <> replay_arb.io.out io.replay.bits.data := sdq(replay_arb.io.out.bits.sdq_id) when (io.replay.valid || sdq_enq) { sdq_val := sdq_val & ~(UIntToOH(replay_arb.io.out.bits.sdq_id) & Fill(cfg.nSDQ, free_sdq)) | PriorityEncoderOH(~sdq_val(cfg.nSDQ-1,0)) & Fill(cfg.nSDQ, sdq_enq) } prefetcher.io.mshr_avail := RegNext(pri_rdy) prefetcher.io.req_val := RegNext(commit_vals.reduce(_||_)) prefetcher.io.req_addr := RegNext(Mux1H(commit_vals, commit_addrs)) prefetcher.io.req_coh := RegNext(Mux1H(commit_vals, commit_cohs)) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } } File AMOALU.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters class StoreGen(typ: UInt, addr: UInt, dat: UInt, maxSize: Int) { val size = Wire(UInt(log2Up(log2Up(maxSize)+1).W)) size := typ val dat_padded = dat.pad(maxSize*8) def misaligned: Bool = (addr & ((1.U << size) - 1.U)(log2Up(maxSize)-1,0)).orR def mask = { var res = 1.U for (i <- 0 until log2Up(maxSize)) { val upper = Mux(addr(i), res, 0.U) | Mux(size >= (i+1).U, ((BigInt(1) << (1 << i))-1).U, 0.U) val lower = Mux(addr(i), 0.U, res) res = Cat(upper, lower) } res } protected def genData(i: Int): UInt = if (i >= log2Up(maxSize)) dat_padded else Mux(size === i.U, Fill(1 << (log2Up(maxSize)-i), dat_padded((8 << i)-1,0)), genData(i+1)) def data = genData(0) def wordData = genData(2) } class LoadGen(typ: UInt, signed: Bool, addr: UInt, dat: UInt, zero: Bool, maxSize: Int) { private val size = new StoreGen(typ, addr, dat, maxSize).size private def genData(logMinSize: Int): UInt = { var res = dat for (i <- log2Up(maxSize)-1 to logMinSize by -1) { val pos = 8 << i val shifted = Mux(addr(i), res(2*pos-1,pos), res(pos-1,0)) val doZero = (i == 0).B && zero val zeroed = Mux(doZero, 0.U, shifted) res = Cat(Mux(size === i.U || doZero, Fill(8*maxSize-pos, signed && zeroed(pos-1)), res(8*maxSize-1,pos)), zeroed) } res } def wordData = genData(2) def data = genData(0) } class AMOALU(operandBits: Int)(implicit p: Parameters) extends Module { val minXLen = 32 val widths = (0 to log2Ceil(operandBits / minXLen)).map(minXLen << _) val io = IO(new Bundle { val mask = Input(UInt((operandBits / 8).W)) val cmd = Input(UInt(M_SZ.W)) val lhs = Input(UInt(operandBits.W)) val rhs = Input(UInt(operandBits.W)) val out = Output(UInt(operandBits.W)) val out_unmasked = Output(UInt(operandBits.W)) }) val max = io.cmd === M_XA_MAX || io.cmd === M_XA_MAXU val min = io.cmd === M_XA_MIN || io.cmd === M_XA_MINU val add = io.cmd === M_XA_ADD val logic_and = io.cmd === M_XA_OR || io.cmd === M_XA_AND val logic_xor = io.cmd === M_XA_XOR || io.cmd === M_XA_OR val adder_out = { // partition the carry chain to support sub-xLen addition val mask = ~(0.U(operandBits.W) +: widths.init.map(w => !io.mask(w/8-1) << (w-1))).reduce(_|_) (io.lhs & mask) + (io.rhs & mask) } val less = { // break up the comparator so the lower parts will be CSE'd def isLessUnsigned(x: UInt, y: UInt, n: Int): Bool = { if (n == minXLen) x(n-1, 0) < y(n-1, 0) else x(n-1, n/2) < y(n-1, n/2) || x(n-1, n/2) === y(n-1, n/2) && isLessUnsigned(x, y, n/2) } def isLess(x: UInt, y: UInt, n: Int): Bool = { val signed = { val mask = M_XA_MIN ^ M_XA_MINU (io.cmd & mask) === (M_XA_MIN & mask) } Mux(x(n-1) === y(n-1), isLessUnsigned(x, y, n), Mux(signed, x(n-1), y(n-1))) } PriorityMux(widths.reverse.map(w => (io.mask(w/8/2), isLess(io.lhs, io.rhs, w)))) } val minmax = Mux(Mux(less, min, max), io.lhs, io.rhs) val logic = Mux(logic_and, io.lhs & io.rhs, 0.U) | Mux(logic_xor, io.lhs ^ io.rhs, 0.U) val out = Mux(add, adder_out, Mux(logic_and || logic_xor, logic, minmax)) val wmask = FillInterleaved(8, io.mask) io.out := wmask & out | ~wmask & io.lhs io.out_unmasked := out }
module BoomMSHR_1( // @[mshrs.scala:36:7] input clock, // @[mshrs.scala:36:7] input reset, // @[mshrs.scala:36:7] input io_req_pri_val, // @[mshrs.scala:39:14] output io_req_pri_rdy, // @[mshrs.scala:39:14] input io_req_sec_val, // @[mshrs.scala:39:14] output io_req_sec_rdy, // @[mshrs.scala:39:14] input io_clear_prefetch, // @[mshrs.scala:39:14] input [5:0] io_rob_pnr_idx, // @[mshrs.scala:39:14] input [5:0] io_rob_head_idx, // @[mshrs.scala:39:14] input [6:0] io_req_uop_uopc, // @[mshrs.scala:39:14] input [31:0] io_req_uop_inst, // @[mshrs.scala:39:14] input [31:0] io_req_uop_debug_inst, // @[mshrs.scala:39:14] input io_req_uop_is_rvc, // @[mshrs.scala:39:14] input [33:0] io_req_uop_debug_pc, // @[mshrs.scala:39:14] input [2:0] io_req_uop_iq_type, // @[mshrs.scala:39:14] input [9:0] io_req_uop_fu_code, // @[mshrs.scala:39:14] input [3:0] io_req_uop_ctrl_br_type, // @[mshrs.scala:39:14] input [1:0] io_req_uop_ctrl_op1_sel, // @[mshrs.scala:39:14] input [2:0] io_req_uop_ctrl_op2_sel, // @[mshrs.scala:39:14] input [2:0] io_req_uop_ctrl_imm_sel, // @[mshrs.scala:39:14] input [4:0] io_req_uop_ctrl_op_fcn, // @[mshrs.scala:39:14] input io_req_uop_ctrl_fcn_dw, // @[mshrs.scala:39:14] input [2:0] io_req_uop_ctrl_csr_cmd, // @[mshrs.scala:39:14] input io_req_uop_ctrl_is_load, // @[mshrs.scala:39:14] input io_req_uop_ctrl_is_sta, // @[mshrs.scala:39:14] input io_req_uop_ctrl_is_std, // @[mshrs.scala:39:14] input [1:0] io_req_uop_iw_state, // @[mshrs.scala:39:14] input io_req_uop_iw_p1_poisoned, // @[mshrs.scala:39:14] input io_req_uop_iw_p2_poisoned, // @[mshrs.scala:39:14] input io_req_uop_is_br, // @[mshrs.scala:39:14] input io_req_uop_is_jalr, // @[mshrs.scala:39:14] input io_req_uop_is_jal, // @[mshrs.scala:39:14] input io_req_uop_is_sfb, // @[mshrs.scala:39:14] input [3:0] io_req_uop_br_mask, // @[mshrs.scala:39:14] input [1:0] io_req_uop_br_tag, // @[mshrs.scala:39:14] input [3:0] io_req_uop_ftq_idx, // @[mshrs.scala:39:14] input io_req_uop_edge_inst, // @[mshrs.scala:39:14] input [5:0] io_req_uop_pc_lob, // @[mshrs.scala:39:14] input io_req_uop_taken, // @[mshrs.scala:39:14] input [19:0] io_req_uop_imm_packed, // @[mshrs.scala:39:14] input [11:0] io_req_uop_csr_addr, // @[mshrs.scala:39:14] input [5:0] io_req_uop_rob_idx, // @[mshrs.scala:39:14] input [3:0] io_req_uop_ldq_idx, // @[mshrs.scala:39:14] input [3:0] io_req_uop_stq_idx, // @[mshrs.scala:39:14] input [1:0] io_req_uop_rxq_idx, // @[mshrs.scala:39:14] input [6:0] io_req_uop_pdst, // @[mshrs.scala:39:14] input [6:0] io_req_uop_prs1, // @[mshrs.scala:39:14] input [6:0] io_req_uop_prs2, // @[mshrs.scala:39:14] input [6:0] io_req_uop_prs3, // @[mshrs.scala:39:14] input [3:0] io_req_uop_ppred, // @[mshrs.scala:39:14] input io_req_uop_prs1_busy, // @[mshrs.scala:39:14] input io_req_uop_prs2_busy, // @[mshrs.scala:39:14] input io_req_uop_prs3_busy, // @[mshrs.scala:39:14] input io_req_uop_ppred_busy, // @[mshrs.scala:39:14] input [6:0] io_req_uop_stale_pdst, // @[mshrs.scala:39:14] input io_req_uop_exception, // @[mshrs.scala:39:14] input [63:0] io_req_uop_exc_cause, // @[mshrs.scala:39:14] input io_req_uop_bypassable, // @[mshrs.scala:39:14] input [4:0] io_req_uop_mem_cmd, // @[mshrs.scala:39:14] input [1:0] io_req_uop_mem_size, // @[mshrs.scala:39:14] input io_req_uop_mem_signed, // @[mshrs.scala:39:14] input io_req_uop_is_fence, // @[mshrs.scala:39:14] input io_req_uop_is_fencei, // @[mshrs.scala:39:14] input io_req_uop_is_amo, // @[mshrs.scala:39:14] input io_req_uop_uses_ldq, // @[mshrs.scala:39:14] input io_req_uop_uses_stq, // @[mshrs.scala:39:14] input io_req_uop_is_sys_pc2epc, // @[mshrs.scala:39:14] input io_req_uop_is_unique, // @[mshrs.scala:39:14] input io_req_uop_flush_on_commit, // @[mshrs.scala:39:14] input io_req_uop_ldst_is_rs1, // @[mshrs.scala:39:14] input [5:0] io_req_uop_ldst, // @[mshrs.scala:39:14] input [5:0] io_req_uop_lrs1, // @[mshrs.scala:39:14] input [5:0] io_req_uop_lrs2, // @[mshrs.scala:39:14] input [5:0] io_req_uop_lrs3, // @[mshrs.scala:39:14] input io_req_uop_ldst_val, // @[mshrs.scala:39:14] input [1:0] io_req_uop_dst_rtype, // @[mshrs.scala:39:14] input [1:0] io_req_uop_lrs1_rtype, // @[mshrs.scala:39:14] input [1:0] io_req_uop_lrs2_rtype, // @[mshrs.scala:39:14] input io_req_uop_frs3_en, // @[mshrs.scala:39:14] input io_req_uop_fp_val, // @[mshrs.scala:39:14] input io_req_uop_fp_single, // @[mshrs.scala:39:14] input io_req_uop_xcpt_pf_if, // @[mshrs.scala:39:14] input io_req_uop_xcpt_ae_if, // @[mshrs.scala:39:14] input io_req_uop_xcpt_ma_if, // @[mshrs.scala:39:14] input io_req_uop_bp_debug_if, // @[mshrs.scala:39:14] input io_req_uop_bp_xcpt_if, // @[mshrs.scala:39:14] input [1:0] io_req_uop_debug_fsrc, // @[mshrs.scala:39:14] input [1:0] io_req_uop_debug_tsrc, // @[mshrs.scala:39:14] input [33:0] io_req_addr, // @[mshrs.scala:39:14] input [63:0] io_req_data, // @[mshrs.scala:39:14] input io_req_is_hella, // @[mshrs.scala:39:14] input io_req_tag_match, // @[mshrs.scala:39:14] input [1:0] io_req_old_meta_coh_state, // @[mshrs.scala:39:14] input [21:0] io_req_old_meta_tag, // @[mshrs.scala:39:14] input [1:0] io_req_way_en, // @[mshrs.scala:39:14] input [4:0] io_req_sdq_id, // @[mshrs.scala:39:14] input io_req_is_probe, // @[mshrs.scala:39:14] output io_idx_valid, // @[mshrs.scala:39:14] output [3:0] io_idx_bits, // @[mshrs.scala:39:14] output io_way_valid, // @[mshrs.scala:39:14] output [1:0] io_way_bits, // @[mshrs.scala:39:14] output io_tag_valid, // @[mshrs.scala:39:14] output [23:0] io_tag_bits, // @[mshrs.scala:39:14] input io_mem_acquire_ready, // @[mshrs.scala:39:14] output io_mem_acquire_valid, // @[mshrs.scala:39:14] output [2:0] io_mem_acquire_bits_param, // @[mshrs.scala:39:14] output [31:0] io_mem_acquire_bits_address, // @[mshrs.scala:39:14] output io_mem_grant_ready, // @[mshrs.scala:39:14] input io_mem_grant_valid, // @[mshrs.scala:39:14] input [2:0] io_mem_grant_bits_opcode, // @[mshrs.scala:39:14] input [1:0] io_mem_grant_bits_param, // @[mshrs.scala:39:14] input [3:0] io_mem_grant_bits_size, // @[mshrs.scala:39:14] input [3:0] io_mem_grant_bits_source, // @[mshrs.scala:39:14] input [2:0] io_mem_grant_bits_sink, // @[mshrs.scala:39:14] input io_mem_grant_bits_denied, // @[mshrs.scala:39:14] input [63:0] io_mem_grant_bits_data, // @[mshrs.scala:39:14] input io_mem_grant_bits_corrupt, // @[mshrs.scala:39:14] input io_mem_finish_ready, // @[mshrs.scala:39:14] output io_mem_finish_valid, // @[mshrs.scala:39:14] output [2:0] io_mem_finish_bits_sink, // @[mshrs.scala:39:14] input io_prober_state_valid, // @[mshrs.scala:39:14] input [33:0] io_prober_state_bits, // @[mshrs.scala:39:14] input io_refill_ready, // @[mshrs.scala:39:14] output io_refill_valid, // @[mshrs.scala:39:14] output [1:0] io_refill_bits_way_en, // @[mshrs.scala:39:14] output [9:0] io_refill_bits_addr, // @[mshrs.scala:39:14] output [63:0] io_refill_bits_data, // @[mshrs.scala:39:14] input io_meta_write_ready, // @[mshrs.scala:39:14] output io_meta_write_valid, // @[mshrs.scala:39:14] output [3:0] io_meta_write_bits_idx, // @[mshrs.scala:39:14] output [1:0] io_meta_write_bits_way_en, // @[mshrs.scala:39:14] output [1:0] io_meta_write_bits_data_coh_state, // @[mshrs.scala:39:14] output [21:0] io_meta_write_bits_data_tag, // @[mshrs.scala:39:14] input io_meta_read_ready, // @[mshrs.scala:39:14] output io_meta_read_valid, // @[mshrs.scala:39:14] output [3:0] io_meta_read_bits_idx, // @[mshrs.scala:39:14] output [1:0] io_meta_read_bits_way_en, // @[mshrs.scala:39:14] output [21:0] io_meta_read_bits_tag, // @[mshrs.scala:39:14] input io_meta_resp_valid, // @[mshrs.scala:39:14] input [1:0] io_meta_resp_bits_coh_state, // @[mshrs.scala:39:14] input [21:0] io_meta_resp_bits_tag, // @[mshrs.scala:39:14] input io_wb_req_ready, // @[mshrs.scala:39:14] output io_wb_req_valid, // @[mshrs.scala:39:14] output [21:0] io_wb_req_bits_tag, // @[mshrs.scala:39:14] output [3:0] io_wb_req_bits_idx, // @[mshrs.scala:39:14] output [2:0] io_wb_req_bits_param, // @[mshrs.scala:39:14] output [1:0] io_wb_req_bits_way_en, // @[mshrs.scala:39:14] output io_commit_val, // @[mshrs.scala:39:14] output [33:0] io_commit_addr, // @[mshrs.scala:39:14] output [1:0] io_commit_coh_state, // @[mshrs.scala:39:14] input io_lb_read_ready, // @[mshrs.scala:39:14] output io_lb_read_valid, // @[mshrs.scala:39:14] output [2:0] io_lb_read_bits_offset, // @[mshrs.scala:39:14] input [63:0] io_lb_resp, // @[mshrs.scala:39:14] input io_lb_write_ready, // @[mshrs.scala:39:14] output io_lb_write_valid, // @[mshrs.scala:39:14] output [2:0] io_lb_write_bits_offset, // @[mshrs.scala:39:14] output [63:0] io_lb_write_bits_data, // @[mshrs.scala:39:14] input io_replay_ready, // @[mshrs.scala:39:14] output io_replay_valid, // @[mshrs.scala:39:14] output [6:0] io_replay_bits_uop_uopc, // @[mshrs.scala:39:14] output [31:0] io_replay_bits_uop_inst, // @[mshrs.scala:39:14] output [31:0] io_replay_bits_uop_debug_inst, // @[mshrs.scala:39:14] output io_replay_bits_uop_is_rvc, // @[mshrs.scala:39:14] output [33:0] io_replay_bits_uop_debug_pc, // @[mshrs.scala:39:14] output [2:0] io_replay_bits_uop_iq_type, // @[mshrs.scala:39:14] output [9:0] io_replay_bits_uop_fu_code, // @[mshrs.scala:39:14] output [3:0] io_replay_bits_uop_ctrl_br_type, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_uop_ctrl_op1_sel, // @[mshrs.scala:39:14] output [2:0] io_replay_bits_uop_ctrl_op2_sel, // @[mshrs.scala:39:14] output [2:0] io_replay_bits_uop_ctrl_imm_sel, // @[mshrs.scala:39:14] output [4:0] io_replay_bits_uop_ctrl_op_fcn, // @[mshrs.scala:39:14] output io_replay_bits_uop_ctrl_fcn_dw, // @[mshrs.scala:39:14] output [2:0] io_replay_bits_uop_ctrl_csr_cmd, // @[mshrs.scala:39:14] output io_replay_bits_uop_ctrl_is_load, // @[mshrs.scala:39:14] output io_replay_bits_uop_ctrl_is_sta, // @[mshrs.scala:39:14] output io_replay_bits_uop_ctrl_is_std, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_uop_iw_state, // @[mshrs.scala:39:14] output io_replay_bits_uop_iw_p1_poisoned, // @[mshrs.scala:39:14] output io_replay_bits_uop_iw_p2_poisoned, // @[mshrs.scala:39:14] output io_replay_bits_uop_is_br, // @[mshrs.scala:39:14] output io_replay_bits_uop_is_jalr, // @[mshrs.scala:39:14] output io_replay_bits_uop_is_jal, // @[mshrs.scala:39:14] output io_replay_bits_uop_is_sfb, // @[mshrs.scala:39:14] output [3:0] io_replay_bits_uop_br_mask, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_uop_br_tag, // @[mshrs.scala:39:14] output [3:0] io_replay_bits_uop_ftq_idx, // @[mshrs.scala:39:14] output io_replay_bits_uop_edge_inst, // @[mshrs.scala:39:14] output [5:0] io_replay_bits_uop_pc_lob, // @[mshrs.scala:39:14] output io_replay_bits_uop_taken, // @[mshrs.scala:39:14] output [19:0] io_replay_bits_uop_imm_packed, // @[mshrs.scala:39:14] output [11:0] io_replay_bits_uop_csr_addr, // @[mshrs.scala:39:14] output [5:0] io_replay_bits_uop_rob_idx, // @[mshrs.scala:39:14] output [3:0] io_replay_bits_uop_ldq_idx, // @[mshrs.scala:39:14] output [3:0] io_replay_bits_uop_stq_idx, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_uop_rxq_idx, // @[mshrs.scala:39:14] output [6:0] io_replay_bits_uop_pdst, // @[mshrs.scala:39:14] output [6:0] io_replay_bits_uop_prs1, // @[mshrs.scala:39:14] output [6:0] io_replay_bits_uop_prs2, // @[mshrs.scala:39:14] output [6:0] io_replay_bits_uop_prs3, // @[mshrs.scala:39:14] output [3:0] io_replay_bits_uop_ppred, // @[mshrs.scala:39:14] output io_replay_bits_uop_prs1_busy, // @[mshrs.scala:39:14] output io_replay_bits_uop_prs2_busy, // @[mshrs.scala:39:14] output io_replay_bits_uop_prs3_busy, // @[mshrs.scala:39:14] output io_replay_bits_uop_ppred_busy, // @[mshrs.scala:39:14] output [6:0] io_replay_bits_uop_stale_pdst, // @[mshrs.scala:39:14] output io_replay_bits_uop_exception, // @[mshrs.scala:39:14] output [63:0] io_replay_bits_uop_exc_cause, // @[mshrs.scala:39:14] output io_replay_bits_uop_bypassable, // @[mshrs.scala:39:14] output [4:0] io_replay_bits_uop_mem_cmd, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_uop_mem_size, // @[mshrs.scala:39:14] output io_replay_bits_uop_mem_signed, // @[mshrs.scala:39:14] output io_replay_bits_uop_is_fence, // @[mshrs.scala:39:14] output io_replay_bits_uop_is_fencei, // @[mshrs.scala:39:14] output io_replay_bits_uop_is_amo, // @[mshrs.scala:39:14] output io_replay_bits_uop_uses_ldq, // @[mshrs.scala:39:14] output io_replay_bits_uop_uses_stq, // @[mshrs.scala:39:14] output io_replay_bits_uop_is_sys_pc2epc, // @[mshrs.scala:39:14] output io_replay_bits_uop_is_unique, // @[mshrs.scala:39:14] output io_replay_bits_uop_flush_on_commit, // @[mshrs.scala:39:14] output io_replay_bits_uop_ldst_is_rs1, // @[mshrs.scala:39:14] output [5:0] io_replay_bits_uop_ldst, // @[mshrs.scala:39:14] output [5:0] io_replay_bits_uop_lrs1, // @[mshrs.scala:39:14] output [5:0] io_replay_bits_uop_lrs2, // @[mshrs.scala:39:14] output [5:0] io_replay_bits_uop_lrs3, // @[mshrs.scala:39:14] output io_replay_bits_uop_ldst_val, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_uop_dst_rtype, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_uop_lrs1_rtype, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_uop_lrs2_rtype, // @[mshrs.scala:39:14] output io_replay_bits_uop_frs3_en, // @[mshrs.scala:39:14] output io_replay_bits_uop_fp_val, // @[mshrs.scala:39:14] output io_replay_bits_uop_fp_single, // @[mshrs.scala:39:14] output io_replay_bits_uop_xcpt_pf_if, // @[mshrs.scala:39:14] output io_replay_bits_uop_xcpt_ae_if, // @[mshrs.scala:39:14] output io_replay_bits_uop_xcpt_ma_if, // @[mshrs.scala:39:14] output io_replay_bits_uop_bp_debug_if, // @[mshrs.scala:39:14] output io_replay_bits_uop_bp_xcpt_if, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_uop_debug_fsrc, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_uop_debug_tsrc, // @[mshrs.scala:39:14] output [33:0] io_replay_bits_addr, // @[mshrs.scala:39:14] output [63:0] io_replay_bits_data, // @[mshrs.scala:39:14] output io_replay_bits_is_hella, // @[mshrs.scala:39:14] output io_replay_bits_tag_match, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_old_meta_coh_state, // @[mshrs.scala:39:14] output [21:0] io_replay_bits_old_meta_tag, // @[mshrs.scala:39:14] output [1:0] io_replay_bits_way_en, // @[mshrs.scala:39:14] output [4:0] io_replay_bits_sdq_id, // @[mshrs.scala:39:14] input io_resp_ready, // @[mshrs.scala:39:14] output io_resp_valid, // @[mshrs.scala:39:14] output [6:0] io_resp_bits_uop_uopc, // @[mshrs.scala:39:14] output [31:0] io_resp_bits_uop_inst, // @[mshrs.scala:39:14] output [31:0] io_resp_bits_uop_debug_inst, // @[mshrs.scala:39:14] output io_resp_bits_uop_is_rvc, // @[mshrs.scala:39:14] output [33:0] io_resp_bits_uop_debug_pc, // @[mshrs.scala:39:14] output [2:0] io_resp_bits_uop_iq_type, // @[mshrs.scala:39:14] output [9:0] io_resp_bits_uop_fu_code, // @[mshrs.scala:39:14] output [3:0] io_resp_bits_uop_ctrl_br_type, // @[mshrs.scala:39:14] output [1:0] io_resp_bits_uop_ctrl_op1_sel, // @[mshrs.scala:39:14] output [2:0] io_resp_bits_uop_ctrl_op2_sel, // @[mshrs.scala:39:14] output [2:0] io_resp_bits_uop_ctrl_imm_sel, // @[mshrs.scala:39:14] output [4:0] io_resp_bits_uop_ctrl_op_fcn, // @[mshrs.scala:39:14] output io_resp_bits_uop_ctrl_fcn_dw, // @[mshrs.scala:39:14] output [2:0] io_resp_bits_uop_ctrl_csr_cmd, // @[mshrs.scala:39:14] output io_resp_bits_uop_ctrl_is_load, // @[mshrs.scala:39:14] output io_resp_bits_uop_ctrl_is_sta, // @[mshrs.scala:39:14] output io_resp_bits_uop_ctrl_is_std, // @[mshrs.scala:39:14] output [1:0] io_resp_bits_uop_iw_state, // @[mshrs.scala:39:14] output io_resp_bits_uop_iw_p1_poisoned, // @[mshrs.scala:39:14] output io_resp_bits_uop_iw_p2_poisoned, // @[mshrs.scala:39:14] output io_resp_bits_uop_is_br, // @[mshrs.scala:39:14] output io_resp_bits_uop_is_jalr, // @[mshrs.scala:39:14] output io_resp_bits_uop_is_jal, // @[mshrs.scala:39:14] output io_resp_bits_uop_is_sfb, // @[mshrs.scala:39:14] output [3:0] io_resp_bits_uop_br_mask, // @[mshrs.scala:39:14] output [1:0] io_resp_bits_uop_br_tag, // @[mshrs.scala:39:14] output [3:0] io_resp_bits_uop_ftq_idx, // @[mshrs.scala:39:14] output io_resp_bits_uop_edge_inst, // @[mshrs.scala:39:14] output [5:0] io_resp_bits_uop_pc_lob, // @[mshrs.scala:39:14] output io_resp_bits_uop_taken, // @[mshrs.scala:39:14] output [19:0] io_resp_bits_uop_imm_packed, // @[mshrs.scala:39:14] output [11:0] io_resp_bits_uop_csr_addr, // @[mshrs.scala:39:14] output [5:0] io_resp_bits_uop_rob_idx, // @[mshrs.scala:39:14] output [3:0] io_resp_bits_uop_ldq_idx, // @[mshrs.scala:39:14] output [3:0] io_resp_bits_uop_stq_idx, // @[mshrs.scala:39:14] output [1:0] io_resp_bits_uop_rxq_idx, // @[mshrs.scala:39:14] output [6:0] io_resp_bits_uop_pdst, // @[mshrs.scala:39:14] output [6:0] io_resp_bits_uop_prs1, // @[mshrs.scala:39:14] output [6:0] io_resp_bits_uop_prs2, // @[mshrs.scala:39:14] output [6:0] io_resp_bits_uop_prs3, // @[mshrs.scala:39:14] output [3:0] io_resp_bits_uop_ppred, // @[mshrs.scala:39:14] output io_resp_bits_uop_prs1_busy, // @[mshrs.scala:39:14] output io_resp_bits_uop_prs2_busy, // @[mshrs.scala:39:14] output io_resp_bits_uop_prs3_busy, // @[mshrs.scala:39:14] output io_resp_bits_uop_ppred_busy, // @[mshrs.scala:39:14] output [6:0] io_resp_bits_uop_stale_pdst, // @[mshrs.scala:39:14] output io_resp_bits_uop_exception, // @[mshrs.scala:39:14] output [63:0] io_resp_bits_uop_exc_cause, // @[mshrs.scala:39:14] output io_resp_bits_uop_bypassable, // @[mshrs.scala:39:14] output [4:0] io_resp_bits_uop_mem_cmd, // @[mshrs.scala:39:14] output [1:0] io_resp_bits_uop_mem_size, // @[mshrs.scala:39:14] output io_resp_bits_uop_mem_signed, // @[mshrs.scala:39:14] output io_resp_bits_uop_is_fence, // @[mshrs.scala:39:14] output io_resp_bits_uop_is_fencei, // @[mshrs.scala:39:14] output io_resp_bits_uop_is_amo, // @[mshrs.scala:39:14] output io_resp_bits_uop_uses_ldq, // @[mshrs.scala:39:14] output io_resp_bits_uop_uses_stq, // @[mshrs.scala:39:14] output io_resp_bits_uop_is_sys_pc2epc, // @[mshrs.scala:39:14] output io_resp_bits_uop_is_unique, // @[mshrs.scala:39:14] output io_resp_bits_uop_flush_on_commit, // @[mshrs.scala:39:14] output io_resp_bits_uop_ldst_is_rs1, // @[mshrs.scala:39:14] output [5:0] io_resp_bits_uop_ldst, // @[mshrs.scala:39:14] output [5:0] io_resp_bits_uop_lrs1, // @[mshrs.scala:39:14] output [5:0] io_resp_bits_uop_lrs2, // @[mshrs.scala:39:14] output [5:0] io_resp_bits_uop_lrs3, // @[mshrs.scala:39:14] output io_resp_bits_uop_ldst_val, // @[mshrs.scala:39:14] output [1:0] io_resp_bits_uop_dst_rtype, // @[mshrs.scala:39:14] output [1:0] io_resp_bits_uop_lrs1_rtype, // @[mshrs.scala:39:14] output [1:0] io_resp_bits_uop_lrs2_rtype, // @[mshrs.scala:39:14] output io_resp_bits_uop_frs3_en, // @[mshrs.scala:39:14] output io_resp_bits_uop_fp_val, // @[mshrs.scala:39:14] output io_resp_bits_uop_fp_single, // @[mshrs.scala:39:14] output io_resp_bits_uop_xcpt_pf_if, // @[mshrs.scala:39:14] output io_resp_bits_uop_xcpt_ae_if, // @[mshrs.scala:39:14] output io_resp_bits_uop_xcpt_ma_if, // @[mshrs.scala:39:14] output io_resp_bits_uop_bp_debug_if, // @[mshrs.scala:39:14] output io_resp_bits_uop_bp_xcpt_if, // @[mshrs.scala:39:14] output [1:0] io_resp_bits_uop_debug_fsrc, // @[mshrs.scala:39:14] output [1:0] io_resp_bits_uop_debug_tsrc, // @[mshrs.scala:39:14] output [63:0] io_resp_bits_data, // @[mshrs.scala:39:14] output io_resp_bits_is_hella, // @[mshrs.scala:39:14] input io_wb_resp, // @[mshrs.scala:39:14] output io_probe_rdy // @[mshrs.scala:39:14] ); wire rpq_io_deq_ready; // @[mshrs.scala:135:20, :215:30, :222:40, :233:41, :256:45] wire _rpq_io_enq_ready; // @[mshrs.scala:128:19] wire _rpq_io_deq_valid; // @[mshrs.scala:128:19] wire [6:0] _rpq_io_deq_bits_uop_uopc; // @[mshrs.scala:128:19] wire [31:0] _rpq_io_deq_bits_uop_inst; // @[mshrs.scala:128:19] wire [31:0] _rpq_io_deq_bits_uop_debug_inst; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_is_rvc; // @[mshrs.scala:128:19] wire [33:0] _rpq_io_deq_bits_uop_debug_pc; // @[mshrs.scala:128:19] wire [2:0] _rpq_io_deq_bits_uop_iq_type; // @[mshrs.scala:128:19] wire [9:0] _rpq_io_deq_bits_uop_fu_code; // @[mshrs.scala:128:19] wire [3:0] _rpq_io_deq_bits_uop_ctrl_br_type; // @[mshrs.scala:128:19] wire [1:0] _rpq_io_deq_bits_uop_ctrl_op1_sel; // @[mshrs.scala:128:19] wire [2:0] _rpq_io_deq_bits_uop_ctrl_op2_sel; // @[mshrs.scala:128:19] wire [2:0] _rpq_io_deq_bits_uop_ctrl_imm_sel; // @[mshrs.scala:128:19] wire [4:0] _rpq_io_deq_bits_uop_ctrl_op_fcn; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_ctrl_fcn_dw; // @[mshrs.scala:128:19] wire [2:0] _rpq_io_deq_bits_uop_ctrl_csr_cmd; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_ctrl_is_load; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_ctrl_is_sta; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_ctrl_is_std; // @[mshrs.scala:128:19] wire [1:0] _rpq_io_deq_bits_uop_iw_state; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_iw_p1_poisoned; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_iw_p2_poisoned; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_is_br; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_is_jalr; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_is_jal; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_is_sfb; // @[mshrs.scala:128:19] wire [3:0] _rpq_io_deq_bits_uop_br_mask; // @[mshrs.scala:128:19] wire [1:0] _rpq_io_deq_bits_uop_br_tag; // @[mshrs.scala:128:19] wire [3:0] _rpq_io_deq_bits_uop_ftq_idx; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_edge_inst; // @[mshrs.scala:128:19] wire [5:0] _rpq_io_deq_bits_uop_pc_lob; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_taken; // @[mshrs.scala:128:19] wire [19:0] _rpq_io_deq_bits_uop_imm_packed; // @[mshrs.scala:128:19] wire [11:0] _rpq_io_deq_bits_uop_csr_addr; // @[mshrs.scala:128:19] wire [5:0] _rpq_io_deq_bits_uop_rob_idx; // @[mshrs.scala:128:19] wire [3:0] _rpq_io_deq_bits_uop_ldq_idx; // @[mshrs.scala:128:19] wire [3:0] _rpq_io_deq_bits_uop_stq_idx; // @[mshrs.scala:128:19] wire [1:0] _rpq_io_deq_bits_uop_rxq_idx; // @[mshrs.scala:128:19] wire [6:0] _rpq_io_deq_bits_uop_pdst; // @[mshrs.scala:128:19] wire [6:0] _rpq_io_deq_bits_uop_prs1; // @[mshrs.scala:128:19] wire [6:0] _rpq_io_deq_bits_uop_prs2; // @[mshrs.scala:128:19] wire [6:0] _rpq_io_deq_bits_uop_prs3; // @[mshrs.scala:128:19] wire [3:0] _rpq_io_deq_bits_uop_ppred; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_prs1_busy; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_prs2_busy; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_prs3_busy; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_ppred_busy; // @[mshrs.scala:128:19] wire [6:0] _rpq_io_deq_bits_uop_stale_pdst; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_exception; // @[mshrs.scala:128:19] wire [63:0] _rpq_io_deq_bits_uop_exc_cause; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_bypassable; // @[mshrs.scala:128:19] wire [4:0] _rpq_io_deq_bits_uop_mem_cmd; // @[mshrs.scala:128:19] wire [1:0] _rpq_io_deq_bits_uop_mem_size; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_mem_signed; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_is_fence; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_is_fencei; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_is_amo; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_uses_ldq; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_uses_stq; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_is_sys_pc2epc; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_is_unique; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_flush_on_commit; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_ldst_is_rs1; // @[mshrs.scala:128:19] wire [5:0] _rpq_io_deq_bits_uop_ldst; // @[mshrs.scala:128:19] wire [5:0] _rpq_io_deq_bits_uop_lrs1; // @[mshrs.scala:128:19] wire [5:0] _rpq_io_deq_bits_uop_lrs2; // @[mshrs.scala:128:19] wire [5:0] _rpq_io_deq_bits_uop_lrs3; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_ldst_val; // @[mshrs.scala:128:19] wire [1:0] _rpq_io_deq_bits_uop_dst_rtype; // @[mshrs.scala:128:19] wire [1:0] _rpq_io_deq_bits_uop_lrs1_rtype; // @[mshrs.scala:128:19] wire [1:0] _rpq_io_deq_bits_uop_lrs2_rtype; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_frs3_en; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_fp_val; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_fp_single; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_xcpt_pf_if; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_xcpt_ae_if; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_xcpt_ma_if; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_bp_debug_if; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_uop_bp_xcpt_if; // @[mshrs.scala:128:19] wire [1:0] _rpq_io_deq_bits_uop_debug_fsrc; // @[mshrs.scala:128:19] wire [1:0] _rpq_io_deq_bits_uop_debug_tsrc; // @[mshrs.scala:128:19] wire [33:0] _rpq_io_deq_bits_addr; // @[mshrs.scala:128:19] wire _rpq_io_deq_bits_is_hella; // @[mshrs.scala:128:19] wire _rpq_io_empty; // @[mshrs.scala:128:19] wire io_req_pri_val_0 = io_req_pri_val; // @[mshrs.scala:36:7] wire io_req_sec_val_0 = io_req_sec_val; // @[mshrs.scala:36:7] wire io_clear_prefetch_0 = io_clear_prefetch; // @[mshrs.scala:36:7] wire [5:0] io_rob_pnr_idx_0 = io_rob_pnr_idx; // @[mshrs.scala:36:7] wire [5:0] io_rob_head_idx_0 = io_rob_head_idx; // @[mshrs.scala:36:7] wire [6:0] io_req_uop_uopc_0 = io_req_uop_uopc; // @[mshrs.scala:36:7] wire [31:0] io_req_uop_inst_0 = io_req_uop_inst; // @[mshrs.scala:36:7] wire [31:0] io_req_uop_debug_inst_0 = io_req_uop_debug_inst; // @[mshrs.scala:36:7] wire io_req_uop_is_rvc_0 = io_req_uop_is_rvc; // @[mshrs.scala:36:7] wire [33:0] io_req_uop_debug_pc_0 = io_req_uop_debug_pc; // @[mshrs.scala:36:7] wire [2:0] io_req_uop_iq_type_0 = io_req_uop_iq_type; // @[mshrs.scala:36:7] wire [9:0] io_req_uop_fu_code_0 = io_req_uop_fu_code; // @[mshrs.scala:36:7] wire [3:0] io_req_uop_ctrl_br_type_0 = io_req_uop_ctrl_br_type; // @[mshrs.scala:36:7] wire [1:0] io_req_uop_ctrl_op1_sel_0 = io_req_uop_ctrl_op1_sel; // @[mshrs.scala:36:7] wire [2:0] io_req_uop_ctrl_op2_sel_0 = io_req_uop_ctrl_op2_sel; // @[mshrs.scala:36:7] wire [2:0] io_req_uop_ctrl_imm_sel_0 = io_req_uop_ctrl_imm_sel; // @[mshrs.scala:36:7] wire [4:0] io_req_uop_ctrl_op_fcn_0 = io_req_uop_ctrl_op_fcn; // @[mshrs.scala:36:7] wire io_req_uop_ctrl_fcn_dw_0 = io_req_uop_ctrl_fcn_dw; // @[mshrs.scala:36:7] wire [2:0] io_req_uop_ctrl_csr_cmd_0 = io_req_uop_ctrl_csr_cmd; // @[mshrs.scala:36:7] wire io_req_uop_ctrl_is_load_0 = io_req_uop_ctrl_is_load; // @[mshrs.scala:36:7] wire io_req_uop_ctrl_is_sta_0 = io_req_uop_ctrl_is_sta; // @[mshrs.scala:36:7] wire io_req_uop_ctrl_is_std_0 = io_req_uop_ctrl_is_std; // @[mshrs.scala:36:7] wire [1:0] io_req_uop_iw_state_0 = io_req_uop_iw_state; // @[mshrs.scala:36:7] wire io_req_uop_iw_p1_poisoned_0 = io_req_uop_iw_p1_poisoned; // @[mshrs.scala:36:7] wire io_req_uop_iw_p2_poisoned_0 = io_req_uop_iw_p2_poisoned; // @[mshrs.scala:36:7] wire io_req_uop_is_br_0 = io_req_uop_is_br; // @[mshrs.scala:36:7] wire io_req_uop_is_jalr_0 = io_req_uop_is_jalr; // @[mshrs.scala:36:7] wire io_req_uop_is_jal_0 = io_req_uop_is_jal; // @[mshrs.scala:36:7] wire io_req_uop_is_sfb_0 = io_req_uop_is_sfb; // @[mshrs.scala:36:7] wire [3:0] io_req_uop_br_mask_0 = io_req_uop_br_mask; // @[mshrs.scala:36:7] wire [1:0] io_req_uop_br_tag_0 = io_req_uop_br_tag; // @[mshrs.scala:36:7] wire [3:0] io_req_uop_ftq_idx_0 = io_req_uop_ftq_idx; // @[mshrs.scala:36:7] wire io_req_uop_edge_inst_0 = io_req_uop_edge_inst; // @[mshrs.scala:36:7] wire [5:0] io_req_uop_pc_lob_0 = io_req_uop_pc_lob; // @[mshrs.scala:36:7] wire io_req_uop_taken_0 = io_req_uop_taken; // @[mshrs.scala:36:7] wire [19:0] io_req_uop_imm_packed_0 = io_req_uop_imm_packed; // @[mshrs.scala:36:7] wire [11:0] io_req_uop_csr_addr_0 = io_req_uop_csr_addr; // @[mshrs.scala:36:7] wire [5:0] io_req_uop_rob_idx_0 = io_req_uop_rob_idx; // @[mshrs.scala:36:7] wire [3:0] io_req_uop_ldq_idx_0 = io_req_uop_ldq_idx; // @[mshrs.scala:36:7] wire [3:0] io_req_uop_stq_idx_0 = io_req_uop_stq_idx; // @[mshrs.scala:36:7] wire [1:0] io_req_uop_rxq_idx_0 = io_req_uop_rxq_idx; // @[mshrs.scala:36:7] wire [6:0] io_req_uop_pdst_0 = io_req_uop_pdst; // @[mshrs.scala:36:7] wire [6:0] io_req_uop_prs1_0 = io_req_uop_prs1; // @[mshrs.scala:36:7] wire [6:0] io_req_uop_prs2_0 = io_req_uop_prs2; // @[mshrs.scala:36:7] wire [6:0] io_req_uop_prs3_0 = io_req_uop_prs3; // @[mshrs.scala:36:7] wire [3:0] io_req_uop_ppred_0 = io_req_uop_ppred; // @[mshrs.scala:36:7] wire io_req_uop_prs1_busy_0 = io_req_uop_prs1_busy; // @[mshrs.scala:36:7] wire io_req_uop_prs2_busy_0 = io_req_uop_prs2_busy; // @[mshrs.scala:36:7] wire io_req_uop_prs3_busy_0 = io_req_uop_prs3_busy; // @[mshrs.scala:36:7] wire io_req_uop_ppred_busy_0 = io_req_uop_ppred_busy; // @[mshrs.scala:36:7] wire [6:0] io_req_uop_stale_pdst_0 = io_req_uop_stale_pdst; // @[mshrs.scala:36:7] wire io_req_uop_exception_0 = io_req_uop_exception; // @[mshrs.scala:36:7] wire [63:0] io_req_uop_exc_cause_0 = io_req_uop_exc_cause; // @[mshrs.scala:36:7] wire io_req_uop_bypassable_0 = io_req_uop_bypassable; // @[mshrs.scala:36:7] wire [4:0] io_req_uop_mem_cmd_0 = io_req_uop_mem_cmd; // @[mshrs.scala:36:7] wire [1:0] io_req_uop_mem_size_0 = io_req_uop_mem_size; // @[mshrs.scala:36:7] wire io_req_uop_mem_signed_0 = io_req_uop_mem_signed; // @[mshrs.scala:36:7] wire io_req_uop_is_fence_0 = io_req_uop_is_fence; // @[mshrs.scala:36:7] wire io_req_uop_is_fencei_0 = io_req_uop_is_fencei; // @[mshrs.scala:36:7] wire io_req_uop_is_amo_0 = io_req_uop_is_amo; // @[mshrs.scala:36:7] wire io_req_uop_uses_ldq_0 = io_req_uop_uses_ldq; // @[mshrs.scala:36:7] wire io_req_uop_uses_stq_0 = io_req_uop_uses_stq; // @[mshrs.scala:36:7] wire io_req_uop_is_sys_pc2epc_0 = io_req_uop_is_sys_pc2epc; // @[mshrs.scala:36:7] wire io_req_uop_is_unique_0 = io_req_uop_is_unique; // @[mshrs.scala:36:7] wire io_req_uop_flush_on_commit_0 = io_req_uop_flush_on_commit; // @[mshrs.scala:36:7] wire io_req_uop_ldst_is_rs1_0 = io_req_uop_ldst_is_rs1; // @[mshrs.scala:36:7] wire [5:0] io_req_uop_ldst_0 = io_req_uop_ldst; // @[mshrs.scala:36:7] wire [5:0] io_req_uop_lrs1_0 = io_req_uop_lrs1; // @[mshrs.scala:36:7] wire [5:0] io_req_uop_lrs2_0 = io_req_uop_lrs2; // @[mshrs.scala:36:7] wire [5:0] io_req_uop_lrs3_0 = io_req_uop_lrs3; // @[mshrs.scala:36:7] wire io_req_uop_ldst_val_0 = io_req_uop_ldst_val; // @[mshrs.scala:36:7] wire [1:0] io_req_uop_dst_rtype_0 = io_req_uop_dst_rtype; // @[mshrs.scala:36:7] wire [1:0] io_req_uop_lrs1_rtype_0 = io_req_uop_lrs1_rtype; // @[mshrs.scala:36:7] wire [1:0] io_req_uop_lrs2_rtype_0 = io_req_uop_lrs2_rtype; // @[mshrs.scala:36:7] wire io_req_uop_frs3_en_0 = io_req_uop_frs3_en; // @[mshrs.scala:36:7] wire io_req_uop_fp_val_0 = io_req_uop_fp_val; // @[mshrs.scala:36:7] wire io_req_uop_fp_single_0 = io_req_uop_fp_single; // @[mshrs.scala:36:7] wire io_req_uop_xcpt_pf_if_0 = io_req_uop_xcpt_pf_if; // @[mshrs.scala:36:7] wire io_req_uop_xcpt_ae_if_0 = io_req_uop_xcpt_ae_if; // @[mshrs.scala:36:7] wire io_req_uop_xcpt_ma_if_0 = io_req_uop_xcpt_ma_if; // @[mshrs.scala:36:7] wire io_req_uop_bp_debug_if_0 = io_req_uop_bp_debug_if; // @[mshrs.scala:36:7] wire io_req_uop_bp_xcpt_if_0 = io_req_uop_bp_xcpt_if; // @[mshrs.scala:36:7] wire [1:0] io_req_uop_debug_fsrc_0 = io_req_uop_debug_fsrc; // @[mshrs.scala:36:7] wire [1:0] io_req_uop_debug_tsrc_0 = io_req_uop_debug_tsrc; // @[mshrs.scala:36:7] wire [33:0] io_req_addr_0 = io_req_addr; // @[mshrs.scala:36:7] wire [63:0] io_req_data_0 = io_req_data; // @[mshrs.scala:36:7] wire io_req_is_hella_0 = io_req_is_hella; // @[mshrs.scala:36:7] wire io_req_tag_match_0 = io_req_tag_match; // @[mshrs.scala:36:7] wire [1:0] io_req_old_meta_coh_state_0 = io_req_old_meta_coh_state; // @[mshrs.scala:36:7] wire [21:0] io_req_old_meta_tag_0 = io_req_old_meta_tag; // @[mshrs.scala:36:7] wire [1:0] io_req_way_en_0 = io_req_way_en; // @[mshrs.scala:36:7] wire [4:0] io_req_sdq_id_0 = io_req_sdq_id; // @[mshrs.scala:36:7] wire io_req_is_probe_0 = io_req_is_probe; // @[mshrs.scala:36:7] wire io_mem_acquire_ready_0 = io_mem_acquire_ready; // @[mshrs.scala:36:7] wire io_mem_grant_valid_0 = io_mem_grant_valid; // @[mshrs.scala:36:7] wire [2:0] io_mem_grant_bits_opcode_0 = io_mem_grant_bits_opcode; // @[mshrs.scala:36:7] wire [1:0] io_mem_grant_bits_param_0 = io_mem_grant_bits_param; // @[mshrs.scala:36:7] wire [3:0] io_mem_grant_bits_size_0 = io_mem_grant_bits_size; // @[mshrs.scala:36:7] wire [3:0] io_mem_grant_bits_source_0 = io_mem_grant_bits_source; // @[mshrs.scala:36:7] wire [2:0] io_mem_grant_bits_sink_0 = io_mem_grant_bits_sink; // @[mshrs.scala:36:7] wire io_mem_grant_bits_denied_0 = io_mem_grant_bits_denied; // @[mshrs.scala:36:7] wire [63:0] io_mem_grant_bits_data_0 = io_mem_grant_bits_data; // @[mshrs.scala:36:7] wire io_mem_grant_bits_corrupt_0 = io_mem_grant_bits_corrupt; // @[mshrs.scala:36:7] wire io_mem_finish_ready_0 = io_mem_finish_ready; // @[mshrs.scala:36:7] wire io_prober_state_valid_0 = io_prober_state_valid; // @[mshrs.scala:36:7] wire [33:0] io_prober_state_bits_0 = io_prober_state_bits; // @[mshrs.scala:36:7] wire io_refill_ready_0 = io_refill_ready; // @[mshrs.scala:36:7] wire io_meta_write_ready_0 = io_meta_write_ready; // @[mshrs.scala:36:7] wire io_meta_read_ready_0 = io_meta_read_ready; // @[mshrs.scala:36:7] wire io_meta_resp_valid_0 = io_meta_resp_valid; // @[mshrs.scala:36:7] wire [1:0] io_meta_resp_bits_coh_state_0 = io_meta_resp_bits_coh_state; // @[mshrs.scala:36:7] wire [21:0] io_meta_resp_bits_tag_0 = io_meta_resp_bits_tag; // @[mshrs.scala:36:7] wire io_wb_req_ready_0 = io_wb_req_ready; // @[mshrs.scala:36:7] wire io_lb_read_ready_0 = io_lb_read_ready; // @[mshrs.scala:36:7] wire [63:0] io_lb_resp_0 = io_lb_resp; // @[mshrs.scala:36:7] wire io_lb_write_ready_0 = io_lb_write_ready; // @[mshrs.scala:36:7] wire io_replay_ready_0 = io_replay_ready; // @[mshrs.scala:36:7] wire io_resp_ready_0 = io_resp_ready; // @[mshrs.scala:36:7] wire io_wb_resp_0 = io_wb_resp; // @[mshrs.scala:36:7] wire _state_T = reset; // @[mshrs.scala:194:11] wire _state_T_26 = reset; // @[mshrs.scala:201:15] wire _state_T_34 = reset; // @[mshrs.scala:194:11] wire _state_T_60 = reset; // @[mshrs.scala:201:15] wire [2:0] io_id = 3'h1; // @[mshrs.scala:36:7] wire [2:0] io_lb_read_bits_id = 3'h1; // @[mshrs.scala:36:7] wire [2:0] io_lb_write_bits_id = 3'h1; // @[mshrs.scala:36:7] wire [3:0] io_brupdate_b1_resolve_mask = 4'h0; // @[mshrs.scala:36:7] wire [3:0] io_brupdate_b1_mispredict_mask = 4'h0; // @[mshrs.scala:36:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type = 4'h0; // @[mshrs.scala:36:7] wire [3:0] io_brupdate_b2_uop_br_mask = 4'h0; // @[mshrs.scala:36:7] wire [3:0] io_brupdate_b2_uop_ftq_idx = 4'h0; // @[mshrs.scala:36:7] wire [3:0] io_brupdate_b2_uop_ldq_idx = 4'h0; // @[mshrs.scala:36:7] wire [3:0] io_brupdate_b2_uop_stq_idx = 4'h0; // @[mshrs.scala:36:7] wire [3:0] io_brupdate_b2_uop_ppred = 4'h0; // @[mshrs.scala:36:7] wire [3:0] _r_T_10 = 4'h0; // @[Metadata.scala:125:10] wire [3:0] _grow_param_r_T_16 = 4'h0; // @[Metadata.scala:68:10] wire [3:0] _coh_on_grant_T_4 = 4'h0; // @[Metadata.scala:87:10] wire [3:0] _r1_T_16 = 4'h0; // @[Metadata.scala:68:10] wire [3:0] _r2_T_16 = 4'h0; // @[Metadata.scala:68:10] wire [3:0] _state_req_needs_wb_r_T_10 = 4'h0; // @[Metadata.scala:125:10] wire [3:0] _state_r_T_16 = 4'h0; // @[Metadata.scala:68:10] wire [3:0] _needs_wb_r_T_10 = 4'h0; // @[Metadata.scala:125:10] wire [3:0] _r_T_80 = 4'h0; // @[Metadata.scala:68:10] wire [3:0] _r_T_139 = 4'h0; // @[Metadata.scala:68:10] wire [3:0] _state_req_needs_wb_r_T_74 = 4'h0; // @[Metadata.scala:125:10] wire [3:0] _state_r_T_75 = 4'h0; // @[Metadata.scala:68:10] wire [6:0] io_brupdate_b2_uop_uopc = 7'h0; // @[mshrs.scala:36:7] wire [6:0] io_brupdate_b2_uop_pdst = 7'h0; // @[mshrs.scala:36:7] wire [6:0] io_brupdate_b2_uop_prs1 = 7'h0; // @[mshrs.scala:36:7] wire [6:0] io_brupdate_b2_uop_prs2 = 7'h0; // @[mshrs.scala:36:7] wire [6:0] io_brupdate_b2_uop_prs3 = 7'h0; // @[mshrs.scala:36:7] wire [6:0] io_brupdate_b2_uop_stale_pdst = 7'h0; // @[mshrs.scala:36:7] wire [6:0] _data_word_T = 7'h0; // @[mshrs.scala:264:32] wire [31:0] io_brupdate_b2_uop_inst = 32'h0; // @[mshrs.scala:36:7] wire [31:0] io_brupdate_b2_uop_debug_inst = 32'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_is_rvc = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_ctrl_fcn_dw = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_ctrl_is_load = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_ctrl_is_sta = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_ctrl_is_std = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_iw_p1_poisoned = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_iw_p2_poisoned = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_is_br = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_is_jalr = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_is_jal = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_is_sfb = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_edge_inst = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_taken = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_prs1_busy = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_prs2_busy = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_prs3_busy = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_ppred_busy = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_exception = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_bypassable = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_mem_signed = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_is_fence = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_is_fencei = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_is_amo = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_uses_ldq = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_uses_stq = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_is_sys_pc2epc = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_is_unique = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_flush_on_commit = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_ldst_is_rs1 = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_ldst_val = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_frs3_en = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_fp_val = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_fp_single = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_xcpt_pf_if = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_xcpt_ae_if = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_xcpt_ma_if = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_bp_debug_if = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_uop_bp_xcpt_if = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_valid = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_mispredict = 1'h0; // @[mshrs.scala:36:7] wire io_brupdate_b2_taken = 1'h0; // @[mshrs.scala:36:7] wire io_exception = 1'h0; // @[mshrs.scala:36:7] wire io_mem_acquire_bits_corrupt = 1'h0; // @[mshrs.scala:36:7] wire _r_T_2 = 1'h0; // @[Metadata.scala:140:24] wire _r_T_4 = 1'h0; // @[Metadata.scala:140:24] wire _r_T_20 = 1'h0; // @[Misc.scala:38:9] wire _r_T_24 = 1'h0; // @[Misc.scala:38:9] wire _r_T_28 = 1'h0; // @[Misc.scala:38:9] wire _grow_param_r_T_26 = 1'h0; // @[Misc.scala:35:9] wire _grow_param_r_T_29 = 1'h0; // @[Misc.scala:35:9] wire _grow_param_r_T_32 = 1'h0; // @[Misc.scala:35:9] wire _grow_param_r_T_35 = 1'h0; // @[Misc.scala:35:9] wire _grow_param_r_T_38 = 1'h0; // @[Misc.scala:35:9] wire _r1_T_26 = 1'h0; // @[Misc.scala:35:9] wire _r1_T_29 = 1'h0; // @[Misc.scala:35:9] wire _r1_T_32 = 1'h0; // @[Misc.scala:35:9] wire _r1_T_35 = 1'h0; // @[Misc.scala:35:9] wire _r1_T_38 = 1'h0; // @[Misc.scala:35:9] wire _r2_T_26 = 1'h0; // @[Misc.scala:35:9] wire _r2_T_29 = 1'h0; // @[Misc.scala:35:9] wire _r2_T_32 = 1'h0; // @[Misc.scala:35:9] wire _r2_T_35 = 1'h0; // @[Misc.scala:35:9] wire _r2_T_38 = 1'h0; // @[Misc.scala:35:9] wire _state_req_needs_wb_r_T_2 = 1'h0; // @[Metadata.scala:140:24] wire _state_req_needs_wb_r_T_4 = 1'h0; // @[Metadata.scala:140:24] wire _state_req_needs_wb_r_T_20 = 1'h0; // @[Misc.scala:38:9] wire _state_req_needs_wb_r_T_24 = 1'h0; // @[Misc.scala:38:9] wire _state_req_needs_wb_r_T_28 = 1'h0; // @[Misc.scala:38:9] wire _state_r_T_26 = 1'h0; // @[Misc.scala:35:9] wire _state_r_T_29 = 1'h0; // @[Misc.scala:35:9] wire _state_r_T_32 = 1'h0; // @[Misc.scala:35:9] wire _state_r_T_35 = 1'h0; // @[Misc.scala:35:9] wire _state_r_T_38 = 1'h0; // @[Misc.scala:35:9] wire _io_mem_acquire_bits_legal_T = 1'h0; // @[Parameters.scala:684:29] wire _io_mem_acquire_bits_legal_T_6 = 1'h0; // @[Parameters.scala:684:54] wire _io_mem_acquire_bits_legal_T_15 = 1'h0; // @[Parameters.scala:686:26] wire io_mem_acquire_bits_a_corrupt = 1'h0; // @[Edges.scala:346:17] wire io_mem_acquire_bits_a_mask_sub_size = 1'h0; // @[Misc.scala:209:26] wire _io_mem_acquire_bits_a_mask_sub_acc_T = 1'h0; // @[Misc.scala:215:38] wire _io_mem_acquire_bits_a_mask_sub_acc_T_1 = 1'h0; // @[Misc.scala:215:38] wire _io_mem_acquire_bits_a_mask_sub_acc_T_2 = 1'h0; // @[Misc.scala:215:38] wire _io_mem_acquire_bits_a_mask_sub_acc_T_3 = 1'h0; // @[Misc.scala:215:38] wire io_resp_bits_data_doZero = 1'h0; // @[AMOALU.scala:43:31] wire io_resp_bits_data_doZero_1 = 1'h0; // @[AMOALU.scala:43:31] wire io_resp_bits_data_doZero_2 = 1'h0; // @[AMOALU.scala:43:31] wire _needs_wb_r_T_2 = 1'h0; // @[Metadata.scala:140:24] wire _needs_wb_r_T_4 = 1'h0; // @[Metadata.scala:140:24] wire _needs_wb_r_T_20 = 1'h0; // @[Misc.scala:38:9] wire _needs_wb_r_T_24 = 1'h0; // @[Misc.scala:38:9] wire _needs_wb_r_T_28 = 1'h0; // @[Misc.scala:38:9] wire _r_T_90 = 1'h0; // @[Misc.scala:35:9] wire _r_T_93 = 1'h0; // @[Misc.scala:35:9] wire _r_T_96 = 1'h0; // @[Misc.scala:35:9] wire _r_T_99 = 1'h0; // @[Misc.scala:35:9] wire _r_T_102 = 1'h0; // @[Misc.scala:35:9] wire _r_T_149 = 1'h0; // @[Misc.scala:35:9] wire _r_T_152 = 1'h0; // @[Misc.scala:35:9] wire _r_T_155 = 1'h0; // @[Misc.scala:35:9] wire _r_T_158 = 1'h0; // @[Misc.scala:35:9] wire _r_T_161 = 1'h0; // @[Misc.scala:35:9] wire _state_req_needs_wb_r_T_66 = 1'h0; // @[Metadata.scala:140:24] wire _state_req_needs_wb_r_T_68 = 1'h0; // @[Metadata.scala:140:24] wire _state_req_needs_wb_r_T_84 = 1'h0; // @[Misc.scala:38:9] wire _state_req_needs_wb_r_T_88 = 1'h0; // @[Misc.scala:38:9] wire _state_req_needs_wb_r_T_92 = 1'h0; // @[Misc.scala:38:9] wire _state_r_T_85 = 1'h0; // @[Misc.scala:35:9] wire _state_r_T_88 = 1'h0; // @[Misc.scala:35:9] wire _state_r_T_91 = 1'h0; // @[Misc.scala:35:9] wire _state_r_T_94 = 1'h0; // @[Misc.scala:35:9] wire _state_r_T_97 = 1'h0; // @[Misc.scala:35:9] wire [33:0] io_brupdate_b2_uop_debug_pc = 34'h0; // @[mshrs.scala:36:7] wire [33:0] io_brupdate_b2_jalr_target = 34'h0; // @[mshrs.scala:36:7] wire [2:0] io_brupdate_b2_uop_iq_type = 3'h0; // @[mshrs.scala:36:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel = 3'h0; // @[mshrs.scala:36:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel = 3'h0; // @[mshrs.scala:36:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd = 3'h0; // @[mshrs.scala:36:7] wire [2:0] io_brupdate_b2_cfi_type = 3'h0; // @[mshrs.scala:36:7] wire [9:0] io_brupdate_b2_uop_fu_code = 10'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel = 2'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_uop_iw_state = 2'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_uop_br_tag = 2'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_uop_rxq_idx = 2'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_uop_mem_size = 2'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_uop_dst_rtype = 2'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype = 2'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype = 2'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc = 2'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc = 2'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_pc_sel = 2'h0; // @[mshrs.scala:36:7] wire [1:0] io_brupdate_b2_target_offset = 2'h0; // @[mshrs.scala:36:7] wire [1:0] new_coh_meta_state = 2'h0; // @[Metadata.scala:160:20] wire [1:0] _r_T_22 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _r_T_26 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _r_T_30 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _r_T_34 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _r_T_38 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _grow_param_r_T_1 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _grow_param_r_T_3 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _grow_param_r_T_5 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _grow_param_r_T_15 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _coh_on_grant_T_1 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _coh_on_grant_T_3 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r1_T_1 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r1_T_3 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r1_T_5 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r1_T_15 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r2_T_1 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r2_T_3 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r2_T_5 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r2_T_15 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _state_req_needs_wb_r_T_22 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _state_req_needs_wb_r_T_26 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _state_req_needs_wb_r_T_30 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _state_req_needs_wb_r_T_34 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _state_req_needs_wb_r_T_38 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _state_r_T_1 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _state_r_T_3 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _state_r_T_5 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _state_r_T_15 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] state_new_coh_meta_state = 2'h0; // @[Metadata.scala:160:20] wire [1:0] _needs_wb_r_T_22 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _needs_wb_r_T_26 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _needs_wb_r_T_30 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _needs_wb_r_T_34 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _needs_wb_r_T_38 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _r_T_65 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r_T_67 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r_T_69 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r_T_79 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r_T_124 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r_T_126 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r_T_128 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _r_T_138 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] new_coh_meta_1_state = 2'h0; // @[Metadata.scala:160:20] wire [1:0] _state_req_needs_wb_r_T_86 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _state_req_needs_wb_r_T_90 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _state_req_needs_wb_r_T_94 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _state_req_needs_wb_r_T_98 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _state_req_needs_wb_r_T_102 = 2'h0; // @[Misc.scala:38:63] wire [1:0] _state_r_T_60 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _state_r_T_62 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _state_r_T_64 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] _state_r_T_74 = 2'h0; // @[Metadata.scala:26:15] wire [1:0] state_new_coh_meta_1_state = 2'h0; // @[Metadata.scala:160:20] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn = 5'h0; // @[mshrs.scala:36:7] wire [4:0] io_brupdate_b2_uop_mem_cmd = 5'h0; // @[mshrs.scala:36:7] wire [5:0] io_brupdate_b2_uop_pc_lob = 6'h0; // @[mshrs.scala:36:7] wire [5:0] io_brupdate_b2_uop_rob_idx = 6'h0; // @[mshrs.scala:36:7] wire [5:0] io_brupdate_b2_uop_ldst = 6'h0; // @[mshrs.scala:36:7] wire [5:0] io_brupdate_b2_uop_lrs1 = 6'h0; // @[mshrs.scala:36:7] wire [5:0] io_brupdate_b2_uop_lrs2 = 6'h0; // @[mshrs.scala:36:7] wire [5:0] io_brupdate_b2_uop_lrs3 = 6'h0; // @[mshrs.scala:36:7] wire [19:0] io_brupdate_b2_uop_imm_packed = 20'h0; // @[mshrs.scala:36:7] wire [11:0] io_brupdate_b2_uop_csr_addr = 12'h0; // @[mshrs.scala:36:7] wire [63:0] io_brupdate_b2_uop_exc_cause = 64'h0; // @[mshrs.scala:36:7] wire [63:0] io_mem_acquire_bits_data = 64'h0; // @[mshrs.scala:36:7] wire [63:0] io_mem_acquire_bits_a_data = 64'h0; // @[Edges.scala:346:17] wire [2:0] io_mem_acquire_bits_opcode = 3'h6; // @[mshrs.scala:36:7] wire [2:0] io_mem_acquire_bits_a_opcode = 3'h6; // @[Edges.scala:346:17] wire [2:0] _io_mem_acquire_bits_a_mask_sizeOH_T = 3'h6; // @[Misc.scala:202:34] wire [3:0] io_mem_acquire_bits_size = 4'h6; // @[mshrs.scala:36:7] wire [3:0] _r_T_12 = 4'h6; // @[Metadata.scala:127:10] wire [3:0] _grow_param_r_T_10 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] _r1_T_10 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] _r2_T_10 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] _state_req_needs_wb_r_T_12 = 4'h6; // @[Metadata.scala:127:10] wire [3:0] _state_r_T_10 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] io_mem_acquire_bits_a_size = 4'h6; // @[Edges.scala:346:17] wire [3:0] _needs_wb_r_T_12 = 4'h6; // @[Metadata.scala:127:10] wire [3:0] _r_T_74 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] _r_T_133 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] _state_req_needs_wb_r_T_76 = 4'h6; // @[Metadata.scala:127:10] wire [3:0] _state_r_T_69 = 4'h6; // @[Metadata.scala:64:10] wire [3:0] io_mem_acquire_bits_source = 4'h1; // @[mshrs.scala:36:7] wire [3:0] io_wb_req_bits_source = 4'h1; // @[mshrs.scala:36:7] wire [3:0] _r_T_9 = 4'h1; // @[Metadata.scala:124:10] wire [3:0] _grow_param_r_T_6 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] _coh_on_grant_T_2 = 4'h1; // @[Metadata.scala:86:10] wire [3:0] _r1_T_6 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] _r2_T_6 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] _state_req_needs_wb_r_T_9 = 4'h1; // @[Metadata.scala:124:10] wire [3:0] _state_r_T_6 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] io_mem_acquire_bits_a_source = 4'h1; // @[Edges.scala:346:17] wire [3:0] _needs_wb_r_T_9 = 4'h1; // @[Metadata.scala:124:10] wire [3:0] _r_T_70 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] _r_T_129 = 4'h1; // @[Metadata.scala:62:10] wire [3:0] _state_req_needs_wb_r_T_73 = 4'h1; // @[Metadata.scala:124:10] wire [3:0] _state_r_T_65 = 4'h1; // @[Metadata.scala:62:10] wire [7:0] io_mem_acquire_bits_mask = 8'hFF; // @[mshrs.scala:36:7] wire [7:0] io_mem_acquire_bits_a_mask = 8'hFF; // @[Edges.scala:346:17] wire [7:0] _io_mem_acquire_bits_a_mask_T = 8'hFF; // @[Misc.scala:222:10] wire io_refill_bits_wmask = 1'h1; // @[mshrs.scala:36:7] wire io_wb_req_bits_voluntary = 1'h1; // @[mshrs.scala:36:7] wire _r_T = 1'h1; // @[Metadata.scala:140:24] wire _state_req_needs_wb_r_T = 1'h1; // @[Metadata.scala:140:24] wire _io_mem_acquire_bits_legal_T_7 = 1'h1; // @[Parameters.scala:91:44] wire _io_mem_acquire_bits_legal_T_8 = 1'h1; // @[Parameters.scala:684:29] wire io_mem_acquire_bits_a_mask_sub_sub_sub_0_1 = 1'h1; // @[Misc.scala:206:21] wire io_mem_acquire_bits_a_mask_sub_sub_size = 1'h1; // @[Misc.scala:209:26] wire io_mem_acquire_bits_a_mask_sub_sub_0_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_sub_sub_1_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_sub_0_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_sub_1_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_sub_2_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_sub_3_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_size = 1'h1; // @[Misc.scala:209:26] wire io_mem_acquire_bits_a_mask_acc = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_1 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_2 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_3 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_4 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_5 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_6 = 1'h1; // @[Misc.scala:215:29] wire io_mem_acquire_bits_a_mask_acc_7 = 1'h1; // @[Misc.scala:215:29] wire _needs_wb_r_T = 1'h1; // @[Metadata.scala:140:24] wire _io_refill_bits_wmask_T = 1'h1; // @[mshrs.scala:342:30] wire _state_req_needs_wb_r_T_64 = 1'h1; // @[Metadata.scala:140:24] wire [21:0] io_meta_write_bits_tag = 22'h0; // @[mshrs.scala:36:7] wire [3:0] _grow_param_r_T_24 = 4'hC; // @[Metadata.scala:72:10] wire [3:0] _coh_on_grant_T_8 = 4'hC; // @[Metadata.scala:89:10] wire [3:0] _r1_T_24 = 4'hC; // @[Metadata.scala:72:10] wire [3:0] _r2_T_24 = 4'hC; // @[Metadata.scala:72:10] wire [3:0] _state_r_T_24 = 4'hC; // @[Metadata.scala:72:10] wire [3:0] _r_T_88 = 4'hC; // @[Metadata.scala:72:10] wire [3:0] _r_T_147 = 4'hC; // @[Metadata.scala:72:10] wire [3:0] _state_r_T_83 = 4'hC; // @[Metadata.scala:72:10] wire [1:0] _grow_param_r_T_11 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _grow_param_r_T_13 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _grow_param_r_T_21 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _grow_param_r_T_23 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _coh_on_grant_T_7 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r1_T_11 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r1_T_13 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r1_T_21 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r1_T_23 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r2_T_11 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r2_T_13 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r2_T_21 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r2_T_23 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _dirties_T = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _state_r_T_11 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _state_r_T_13 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _state_r_T_21 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _state_r_T_23 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] io_mem_acquire_bits_a_mask_lo_lo = 2'h3; // @[Misc.scala:222:10] wire [1:0] io_mem_acquire_bits_a_mask_lo_hi = 2'h3; // @[Misc.scala:222:10] wire [1:0] io_mem_acquire_bits_a_mask_hi_lo = 2'h3; // @[Misc.scala:222:10] wire [1:0] io_mem_acquire_bits_a_mask_hi_hi = 2'h3; // @[Misc.scala:222:10] wire [1:0] _r_T_75 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r_T_77 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r_T_85 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r_T_87 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r_T_134 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r_T_136 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r_T_144 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _r_T_146 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _state_r_T_70 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _state_r_T_72 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _state_r_T_80 = 2'h3; // @[Metadata.scala:24:15] wire [1:0] _state_r_T_82 = 2'h3; // @[Metadata.scala:24:15] wire [3:0] _grow_param_r_T_22 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _r1_T_22 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _r2_T_22 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _state_r_T_22 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _r_T_86 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _r_T_145 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _state_r_T_81 = 4'hD; // @[Metadata.scala:71:10] wire [3:0] _r_T_14 = 4'h4; // @[Metadata.scala:129:10] wire [3:0] _grow_param_r_T_20 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _coh_on_grant_T_6 = 4'h4; // @[Metadata.scala:88:10] wire [3:0] _r1_T_20 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _r2_T_20 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _state_req_needs_wb_r_T_14 = 4'h4; // @[Metadata.scala:129:10] wire [3:0] _state_r_T_20 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _io_mem_acquire_bits_a_mask_sizeOH_T_1 = 4'h4; // @[OneHot.scala:65:12] wire [3:0] _needs_wb_r_T_14 = 4'h4; // @[Metadata.scala:129:10] wire [3:0] _r_T_84 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _r_T_143 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _state_req_needs_wb_r_T_78 = 4'h4; // @[Metadata.scala:129:10] wire [3:0] _state_r_T_79 = 4'h4; // @[Metadata.scala:70:10] wire [3:0] _r_T_13 = 4'h5; // @[Metadata.scala:128:10] wire [3:0] _grow_param_r_T_18 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _r1_T_18 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _r2_T_18 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _state_req_needs_wb_r_T_13 = 4'h5; // @[Metadata.scala:128:10] wire [3:0] _state_r_T_18 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _needs_wb_r_T_13 = 4'h5; // @[Metadata.scala:128:10] wire [3:0] _r_T_82 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _r_T_141 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _state_req_needs_wb_r_T_77 = 4'h5; // @[Metadata.scala:128:10] wire [3:0] _state_r_T_77 = 4'h5; // @[Metadata.scala:69:10] wire [3:0] _grow_param_r_T_14 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _r1_T_14 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _r2_T_14 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _state_r_T_14 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _r_T_78 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _r_T_137 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _state_r_T_73 = 4'hE; // @[Metadata.scala:66:10] wire [3:0] _grow_param_r_T_12 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] _r1_T_12 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] _r2_T_12 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] _state_r_T_12 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] io_mem_acquire_bits_a_mask_lo = 4'hF; // @[Misc.scala:222:10] wire [3:0] io_mem_acquire_bits_a_mask_hi = 4'hF; // @[Misc.scala:222:10] wire [3:0] _r_T_76 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] _r_T_135 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] _state_r_T_71 = 4'hF; // @[Metadata.scala:65:10] wire [3:0] _r_T_11 = 4'h7; // @[Metadata.scala:126:10] wire [3:0] _grow_param_r_T_8 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _r1_T_8 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _r2_T_8 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _state_req_needs_wb_r_T_11 = 4'h7; // @[Metadata.scala:126:10] wire [3:0] _state_r_T_8 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _needs_wb_r_T_11 = 4'h7; // @[Metadata.scala:126:10] wire [3:0] _r_T_72 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _r_T_131 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _state_req_needs_wb_r_T_75 = 4'h7; // @[Metadata.scala:126:10] wire [3:0] _state_r_T_67 = 4'h7; // @[Metadata.scala:63:10] wire [3:0] _r_T_8 = 4'h2; // @[Metadata.scala:123:10] wire [3:0] _grow_param_r_T_4 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _r1_T_4 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _r2_T_4 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _state_req_needs_wb_r_T_8 = 4'h2; // @[Metadata.scala:123:10] wire [3:0] _state_r_T_4 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _needs_wb_r_T_8 = 4'h2; // @[Metadata.scala:123:10] wire [3:0] _r_T_68 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _r_T_127 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _state_req_needs_wb_r_T_72 = 4'h2; // @[Metadata.scala:123:10] wire [3:0] _state_r_T_63 = 4'h2; // @[Metadata.scala:61:10] wire [3:0] _r_T_7 = 4'h3; // @[Metadata.scala:122:10] wire [3:0] _grow_param_r_T_2 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _r1_T_2 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _r2_T_2 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _state_req_needs_wb_r_T_7 = 4'h3; // @[Metadata.scala:122:10] wire [3:0] _state_r_T_2 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _needs_wb_r_T_7 = 4'h3; // @[Metadata.scala:122:10] wire [3:0] _r_T_66 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _r_T_125 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _state_req_needs_wb_r_T_71 = 4'h3; // @[Metadata.scala:122:10] wire [3:0] _state_r_T_61 = 4'h3; // @[Metadata.scala:60:10] wire [3:0] _r_T_18 = 4'h8; // @[Metadata.scala:133:10] wire [3:0] _state_req_needs_wb_r_T_18 = 4'h8; // @[Metadata.scala:133:10] wire [3:0] _needs_wb_r_T_18 = 4'h8; // @[Metadata.scala:133:10] wire [3:0] _state_req_needs_wb_r_T_82 = 4'h8; // @[Metadata.scala:133:10] wire [3:0] _r_T_17 = 4'h9; // @[Metadata.scala:132:10] wire [3:0] _state_req_needs_wb_r_T_17 = 4'h9; // @[Metadata.scala:132:10] wire [3:0] _needs_wb_r_T_17 = 4'h9; // @[Metadata.scala:132:10] wire [3:0] _state_req_needs_wb_r_T_81 = 4'h9; // @[Metadata.scala:132:10] wire [3:0] _r_T_16 = 4'hA; // @[Metadata.scala:131:10] wire [3:0] _state_req_needs_wb_r_T_16 = 4'hA; // @[Metadata.scala:131:10] wire [3:0] _needs_wb_r_T_16 = 4'hA; // @[Metadata.scala:131:10] wire [3:0] _state_req_needs_wb_r_T_80 = 4'hA; // @[Metadata.scala:131:10] wire [3:0] _r_T_15 = 4'hB; // @[Metadata.scala:130:10] wire [3:0] _state_req_needs_wb_r_T_15 = 4'hB; // @[Metadata.scala:130:10] wire [3:0] _needs_wb_r_T_15 = 4'hB; // @[Metadata.scala:130:10] wire [3:0] _state_req_needs_wb_r_T_79 = 4'hB; // @[Metadata.scala:130:10] wire [1:0] _r_T_1 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _r_T_3 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _r_T_5 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _state_req_needs_wb_r_T_1 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _state_req_needs_wb_r_T_3 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _state_req_needs_wb_r_T_5 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] io_mem_acquire_bits_a_mask_sizeOH_shiftAmount = 2'h2; // @[OneHot.scala:64:49] wire [1:0] _needs_wb_r_T_1 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _needs_wb_r_T_3 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _needs_wb_r_T_5 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _state_req_needs_wb_r_T_65 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _state_req_needs_wb_r_T_67 = 2'h2; // @[Metadata.scala:140:24] wire [1:0] _state_req_needs_wb_r_T_69 = 2'h2; // @[Metadata.scala:140:24] wire [2:0] io_mem_acquire_bits_a_mask_sizeOH = 3'h5; // @[Misc.scala:202:81] wire [2:0] _io_mem_acquire_bits_a_mask_sizeOH_T_2 = 3'h4; // @[OneHot.scala:65:27] wire [1:0] _grow_param_r_T_7 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _grow_param_r_T_9 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _grow_param_r_T_17 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _grow_param_r_T_19 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _coh_on_grant_T_5 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r1_T_7 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r1_T_9 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r1_T_17 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r1_T_19 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r2_T_7 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r2_T_9 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r2_T_17 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r2_T_19 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _state_r_T_7 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _state_r_T_9 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _state_r_T_17 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _state_r_T_19 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_71 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_73 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_81 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_83 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_130 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_132 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_140 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _r_T_142 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _state_r_T_66 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _state_r_T_68 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _state_r_T_76 = 2'h1; // @[Metadata.scala:25:15] wire [1:0] _state_r_T_78 = 2'h1; // @[Metadata.scala:25:15] wire _io_req_sec_rdy_T; // @[mshrs.scala:159:37] wire _io_idx_valid_T; // @[mshrs.scala:149:25] wire [3:0] req_idx; // @[mshrs.scala:110:25] wire _io_way_valid_T_3; // @[mshrs.scala:151:19] wire _io_tag_valid_T; // @[mshrs.scala:150:25] wire [23:0] req_tag; // @[mshrs.scala:111:26] wire [2:0] io_mem_acquire_bits_a_param; // @[Edges.scala:346:17] wire [31:0] io_mem_acquire_bits_a_address; // @[Edges.scala:346:17] wire [2:0] grantack_bits_e_sink = io_mem_grant_bits_sink_0; // @[Edges.scala:451:17] wire [63:0] io_lb_write_bits_data_0 = io_mem_grant_bits_data_0; // @[mshrs.scala:36:7] wire [2:0] shrink_param; // @[Misc.scala:38:36] wire [1:0] coh_on_grant_state; // @[Metadata.scala:160:20] wire [63:0] io_refill_bits_data_0 = io_lb_resp_0; // @[mshrs.scala:36:7] wire [63:0] data_word = io_lb_resp_0; // @[mshrs.scala:36:7, :264:26] wire [33:0] _io_replay_bits_addr_T_1; // @[mshrs.scala:353:31] wire [63:0] _io_resp_bits_data_T_23; // @[AMOALU.scala:45:16] wire _io_probe_rdy_T_11; // @[mshrs.scala:148:42] wire io_idx_valid_0; // @[mshrs.scala:36:7] wire [3:0] io_idx_bits_0; // @[mshrs.scala:36:7] wire io_way_valid_0; // @[mshrs.scala:36:7] wire [1:0] io_way_bits_0; // @[mshrs.scala:36:7] wire io_tag_valid_0; // @[mshrs.scala:36:7] wire [23:0] io_tag_bits_0; // @[mshrs.scala:36:7] wire [2:0] io_mem_acquire_bits_param_0; // @[mshrs.scala:36:7] wire [31:0] io_mem_acquire_bits_address_0; // @[mshrs.scala:36:7] wire io_mem_acquire_valid_0; // @[mshrs.scala:36:7] wire io_mem_grant_ready_0; // @[mshrs.scala:36:7] wire [2:0] io_mem_finish_bits_sink_0; // @[mshrs.scala:36:7] wire io_mem_finish_valid_0; // @[mshrs.scala:36:7] wire [1:0] io_refill_bits_way_en_0; // @[mshrs.scala:36:7] wire [9:0] io_refill_bits_addr_0; // @[mshrs.scala:36:7] wire io_refill_valid_0; // @[mshrs.scala:36:7] wire [1:0] io_meta_write_bits_data_coh_state_0; // @[mshrs.scala:36:7] wire [21:0] io_meta_write_bits_data_tag_0; // @[mshrs.scala:36:7] wire [3:0] io_meta_write_bits_idx_0; // @[mshrs.scala:36:7] wire [1:0] io_meta_write_bits_way_en_0; // @[mshrs.scala:36:7] wire io_meta_write_valid_0; // @[mshrs.scala:36:7] wire [3:0] io_meta_read_bits_idx_0; // @[mshrs.scala:36:7] wire [1:0] io_meta_read_bits_way_en_0; // @[mshrs.scala:36:7] wire [21:0] io_meta_read_bits_tag_0; // @[mshrs.scala:36:7] wire io_meta_read_valid_0; // @[mshrs.scala:36:7] wire [21:0] io_wb_req_bits_tag_0; // @[mshrs.scala:36:7] wire [3:0] io_wb_req_bits_idx_0; // @[mshrs.scala:36:7] wire [2:0] io_wb_req_bits_param_0; // @[mshrs.scala:36:7] wire [1:0] io_wb_req_bits_way_en_0; // @[mshrs.scala:36:7] wire io_wb_req_valid_0; // @[mshrs.scala:36:7] wire [1:0] io_commit_coh_state_0; // @[mshrs.scala:36:7] wire [2:0] io_lb_read_bits_offset_0; // @[mshrs.scala:36:7] wire io_lb_read_valid_0; // @[mshrs.scala:36:7] wire [2:0] io_lb_write_bits_offset_0; // @[mshrs.scala:36:7] wire io_lb_write_valid_0; // @[mshrs.scala:36:7] wire [3:0] io_replay_bits_uop_ctrl_br_type_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_uop_ctrl_op1_sel_0; // @[mshrs.scala:36:7] wire [2:0] io_replay_bits_uop_ctrl_op2_sel_0; // @[mshrs.scala:36:7] wire [2:0] io_replay_bits_uop_ctrl_imm_sel_0; // @[mshrs.scala:36:7] wire [4:0] io_replay_bits_uop_ctrl_op_fcn_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_ctrl_fcn_dw_0; // @[mshrs.scala:36:7] wire [2:0] io_replay_bits_uop_ctrl_csr_cmd_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_ctrl_is_load_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_ctrl_is_sta_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_ctrl_is_std_0; // @[mshrs.scala:36:7] wire [6:0] io_replay_bits_uop_uopc_0; // @[mshrs.scala:36:7] wire [31:0] io_replay_bits_uop_inst_0; // @[mshrs.scala:36:7] wire [31:0] io_replay_bits_uop_debug_inst_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_is_rvc_0; // @[mshrs.scala:36:7] wire [33:0] io_replay_bits_uop_debug_pc_0; // @[mshrs.scala:36:7] wire [2:0] io_replay_bits_uop_iq_type_0; // @[mshrs.scala:36:7] wire [9:0] io_replay_bits_uop_fu_code_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_uop_iw_state_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_iw_p1_poisoned_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_iw_p2_poisoned_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_is_br_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_is_jalr_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_is_jal_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_is_sfb_0; // @[mshrs.scala:36:7] wire [3:0] io_replay_bits_uop_br_mask_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_uop_br_tag_0; // @[mshrs.scala:36:7] wire [3:0] io_replay_bits_uop_ftq_idx_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_edge_inst_0; // @[mshrs.scala:36:7] wire [5:0] io_replay_bits_uop_pc_lob_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_taken_0; // @[mshrs.scala:36:7] wire [19:0] io_replay_bits_uop_imm_packed_0; // @[mshrs.scala:36:7] wire [11:0] io_replay_bits_uop_csr_addr_0; // @[mshrs.scala:36:7] wire [5:0] io_replay_bits_uop_rob_idx_0; // @[mshrs.scala:36:7] wire [3:0] io_replay_bits_uop_ldq_idx_0; // @[mshrs.scala:36:7] wire [3:0] io_replay_bits_uop_stq_idx_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_uop_rxq_idx_0; // @[mshrs.scala:36:7] wire [6:0] io_replay_bits_uop_pdst_0; // @[mshrs.scala:36:7] wire [6:0] io_replay_bits_uop_prs1_0; // @[mshrs.scala:36:7] wire [6:0] io_replay_bits_uop_prs2_0; // @[mshrs.scala:36:7] wire [6:0] io_replay_bits_uop_prs3_0; // @[mshrs.scala:36:7] wire [3:0] io_replay_bits_uop_ppred_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_prs1_busy_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_prs2_busy_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_prs3_busy_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_ppred_busy_0; // @[mshrs.scala:36:7] wire [6:0] io_replay_bits_uop_stale_pdst_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_exception_0; // @[mshrs.scala:36:7] wire [63:0] io_replay_bits_uop_exc_cause_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_bypassable_0; // @[mshrs.scala:36:7] wire [4:0] io_replay_bits_uop_mem_cmd_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_uop_mem_size_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_mem_signed_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_is_fence_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_is_fencei_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_is_amo_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_uses_ldq_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_uses_stq_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_is_sys_pc2epc_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_is_unique_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_flush_on_commit_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_ldst_is_rs1_0; // @[mshrs.scala:36:7] wire [5:0] io_replay_bits_uop_ldst_0; // @[mshrs.scala:36:7] wire [5:0] io_replay_bits_uop_lrs1_0; // @[mshrs.scala:36:7] wire [5:0] io_replay_bits_uop_lrs2_0; // @[mshrs.scala:36:7] wire [5:0] io_replay_bits_uop_lrs3_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_ldst_val_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_uop_dst_rtype_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_uop_lrs1_rtype_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_uop_lrs2_rtype_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_frs3_en_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_fp_val_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_fp_single_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_xcpt_pf_if_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_xcpt_ae_if_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_xcpt_ma_if_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_bp_debug_if_0; // @[mshrs.scala:36:7] wire io_replay_bits_uop_bp_xcpt_if_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_uop_debug_fsrc_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_uop_debug_tsrc_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_old_meta_coh_state_0; // @[mshrs.scala:36:7] wire [21:0] io_replay_bits_old_meta_tag_0; // @[mshrs.scala:36:7] wire [33:0] io_replay_bits_addr_0; // @[mshrs.scala:36:7] wire [63:0] io_replay_bits_data_0; // @[mshrs.scala:36:7] wire io_replay_bits_is_hella_0; // @[mshrs.scala:36:7] wire io_replay_bits_tag_match_0; // @[mshrs.scala:36:7] wire [1:0] io_replay_bits_way_en_0; // @[mshrs.scala:36:7] wire [4:0] io_replay_bits_sdq_id_0; // @[mshrs.scala:36:7] wire io_replay_valid_0; // @[mshrs.scala:36:7] wire [3:0] io_resp_bits_uop_ctrl_br_type_0; // @[mshrs.scala:36:7] wire [1:0] io_resp_bits_uop_ctrl_op1_sel_0; // @[mshrs.scala:36:7] wire [2:0] io_resp_bits_uop_ctrl_op2_sel_0; // @[mshrs.scala:36:7] wire [2:0] io_resp_bits_uop_ctrl_imm_sel_0; // @[mshrs.scala:36:7] wire [4:0] io_resp_bits_uop_ctrl_op_fcn_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_ctrl_fcn_dw_0; // @[mshrs.scala:36:7] wire [2:0] io_resp_bits_uop_ctrl_csr_cmd_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_ctrl_is_load_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_ctrl_is_sta_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_ctrl_is_std_0; // @[mshrs.scala:36:7] wire [6:0] io_resp_bits_uop_uopc_0; // @[mshrs.scala:36:7] wire [31:0] io_resp_bits_uop_inst_0; // @[mshrs.scala:36:7] wire [31:0] io_resp_bits_uop_debug_inst_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_is_rvc_0; // @[mshrs.scala:36:7] wire [33:0] io_resp_bits_uop_debug_pc_0; // @[mshrs.scala:36:7] wire [2:0] io_resp_bits_uop_iq_type_0; // @[mshrs.scala:36:7] wire [9:0] io_resp_bits_uop_fu_code_0; // @[mshrs.scala:36:7] wire [1:0] io_resp_bits_uop_iw_state_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_iw_p1_poisoned_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_iw_p2_poisoned_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_is_br_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_is_jalr_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_is_jal_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_is_sfb_0; // @[mshrs.scala:36:7] wire [3:0] io_resp_bits_uop_br_mask_0; // @[mshrs.scala:36:7] wire [1:0] io_resp_bits_uop_br_tag_0; // @[mshrs.scala:36:7] wire [3:0] io_resp_bits_uop_ftq_idx_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_edge_inst_0; // @[mshrs.scala:36:7] wire [5:0] io_resp_bits_uop_pc_lob_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_taken_0; // @[mshrs.scala:36:7] wire [19:0] io_resp_bits_uop_imm_packed_0; // @[mshrs.scala:36:7] wire [11:0] io_resp_bits_uop_csr_addr_0; // @[mshrs.scala:36:7] wire [5:0] io_resp_bits_uop_rob_idx_0; // @[mshrs.scala:36:7] wire [3:0] io_resp_bits_uop_ldq_idx_0; // @[mshrs.scala:36:7] wire [3:0] io_resp_bits_uop_stq_idx_0; // @[mshrs.scala:36:7] wire [1:0] io_resp_bits_uop_rxq_idx_0; // @[mshrs.scala:36:7] wire [6:0] io_resp_bits_uop_pdst_0; // @[mshrs.scala:36:7] wire [6:0] io_resp_bits_uop_prs1_0; // @[mshrs.scala:36:7] wire [6:0] io_resp_bits_uop_prs2_0; // @[mshrs.scala:36:7] wire [6:0] io_resp_bits_uop_prs3_0; // @[mshrs.scala:36:7] wire [3:0] io_resp_bits_uop_ppred_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_prs1_busy_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_prs2_busy_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_prs3_busy_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_ppred_busy_0; // @[mshrs.scala:36:7] wire [6:0] io_resp_bits_uop_stale_pdst_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_exception_0; // @[mshrs.scala:36:7] wire [63:0] io_resp_bits_uop_exc_cause_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_bypassable_0; // @[mshrs.scala:36:7] wire [4:0] io_resp_bits_uop_mem_cmd_0; // @[mshrs.scala:36:7] wire [1:0] io_resp_bits_uop_mem_size_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_mem_signed_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_is_fence_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_is_fencei_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_is_amo_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_uses_ldq_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_uses_stq_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_is_sys_pc2epc_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_is_unique_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_flush_on_commit_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_ldst_is_rs1_0; // @[mshrs.scala:36:7] wire [5:0] io_resp_bits_uop_ldst_0; // @[mshrs.scala:36:7] wire [5:0] io_resp_bits_uop_lrs1_0; // @[mshrs.scala:36:7] wire [5:0] io_resp_bits_uop_lrs2_0; // @[mshrs.scala:36:7] wire [5:0] io_resp_bits_uop_lrs3_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_ldst_val_0; // @[mshrs.scala:36:7] wire [1:0] io_resp_bits_uop_dst_rtype_0; // @[mshrs.scala:36:7] wire [1:0] io_resp_bits_uop_lrs1_rtype_0; // @[mshrs.scala:36:7] wire [1:0] io_resp_bits_uop_lrs2_rtype_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_frs3_en_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_fp_val_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_fp_single_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_xcpt_pf_if_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_xcpt_ae_if_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_xcpt_ma_if_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_bp_debug_if_0; // @[mshrs.scala:36:7] wire io_resp_bits_uop_bp_xcpt_if_0; // @[mshrs.scala:36:7] wire [1:0] io_resp_bits_uop_debug_fsrc_0; // @[mshrs.scala:36:7] wire [1:0] io_resp_bits_uop_debug_tsrc_0; // @[mshrs.scala:36:7] wire [63:0] io_resp_bits_data_0; // @[mshrs.scala:36:7] wire io_resp_bits_is_hella_0; // @[mshrs.scala:36:7] wire io_resp_valid_0; // @[mshrs.scala:36:7] wire io_req_pri_rdy_0; // @[mshrs.scala:36:7] wire io_req_sec_rdy_0; // @[mshrs.scala:36:7] wire io_commit_val_0; // @[mshrs.scala:36:7] wire [33:0] io_commit_addr_0; // @[mshrs.scala:36:7] wire io_probe_rdy_0; // @[mshrs.scala:36:7] reg [4:0] state; // @[mshrs.scala:107:22] reg [6:0] req_uop_uopc; // @[mshrs.scala:109:20] reg [31:0] req_uop_inst; // @[mshrs.scala:109:20] reg [31:0] req_uop_debug_inst; // @[mshrs.scala:109:20] reg req_uop_is_rvc; // @[mshrs.scala:109:20] reg [33:0] req_uop_debug_pc; // @[mshrs.scala:109:20] reg [2:0] req_uop_iq_type; // @[mshrs.scala:109:20] reg [9:0] req_uop_fu_code; // @[mshrs.scala:109:20] reg [3:0] req_uop_ctrl_br_type; // @[mshrs.scala:109:20] reg [1:0] req_uop_ctrl_op1_sel; // @[mshrs.scala:109:20] reg [2:0] req_uop_ctrl_op2_sel; // @[mshrs.scala:109:20] reg [2:0] req_uop_ctrl_imm_sel; // @[mshrs.scala:109:20] reg [4:0] req_uop_ctrl_op_fcn; // @[mshrs.scala:109:20] reg req_uop_ctrl_fcn_dw; // @[mshrs.scala:109:20] reg [2:0] req_uop_ctrl_csr_cmd; // @[mshrs.scala:109:20] reg req_uop_ctrl_is_load; // @[mshrs.scala:109:20] reg req_uop_ctrl_is_sta; // @[mshrs.scala:109:20] reg req_uop_ctrl_is_std; // @[mshrs.scala:109:20] reg [1:0] req_uop_iw_state; // @[mshrs.scala:109:20] reg req_uop_iw_p1_poisoned; // @[mshrs.scala:109:20] reg req_uop_iw_p2_poisoned; // @[mshrs.scala:109:20] reg req_uop_is_br; // @[mshrs.scala:109:20] reg req_uop_is_jalr; // @[mshrs.scala:109:20] reg req_uop_is_jal; // @[mshrs.scala:109:20] reg req_uop_is_sfb; // @[mshrs.scala:109:20] reg [3:0] req_uop_br_mask; // @[mshrs.scala:109:20] reg [1:0] req_uop_br_tag; // @[mshrs.scala:109:20] reg [3:0] req_uop_ftq_idx; // @[mshrs.scala:109:20] reg req_uop_edge_inst; // @[mshrs.scala:109:20] reg [5:0] req_uop_pc_lob; // @[mshrs.scala:109:20] reg req_uop_taken; // @[mshrs.scala:109:20] reg [19:0] req_uop_imm_packed; // @[mshrs.scala:109:20] reg [11:0] req_uop_csr_addr; // @[mshrs.scala:109:20] reg [5:0] req_uop_rob_idx; // @[mshrs.scala:109:20] reg [3:0] req_uop_ldq_idx; // @[mshrs.scala:109:20] reg [3:0] req_uop_stq_idx; // @[mshrs.scala:109:20] reg [1:0] req_uop_rxq_idx; // @[mshrs.scala:109:20] reg [6:0] req_uop_pdst; // @[mshrs.scala:109:20] reg [6:0] req_uop_prs1; // @[mshrs.scala:109:20] reg [6:0] req_uop_prs2; // @[mshrs.scala:109:20] reg [6:0] req_uop_prs3; // @[mshrs.scala:109:20] reg [3:0] req_uop_ppred; // @[mshrs.scala:109:20] reg req_uop_prs1_busy; // @[mshrs.scala:109:20] reg req_uop_prs2_busy; // @[mshrs.scala:109:20] reg req_uop_prs3_busy; // @[mshrs.scala:109:20] reg req_uop_ppred_busy; // @[mshrs.scala:109:20] reg [6:0] req_uop_stale_pdst; // @[mshrs.scala:109:20] reg req_uop_exception; // @[mshrs.scala:109:20] reg [63:0] req_uop_exc_cause; // @[mshrs.scala:109:20] reg req_uop_bypassable; // @[mshrs.scala:109:20] reg [4:0] req_uop_mem_cmd; // @[mshrs.scala:109:20] reg [1:0] req_uop_mem_size; // @[mshrs.scala:109:20] reg req_uop_mem_signed; // @[mshrs.scala:109:20] reg req_uop_is_fence; // @[mshrs.scala:109:20] reg req_uop_is_fencei; // @[mshrs.scala:109:20] reg req_uop_is_amo; // @[mshrs.scala:109:20] reg req_uop_uses_ldq; // @[mshrs.scala:109:20] reg req_uop_uses_stq; // @[mshrs.scala:109:20] reg req_uop_is_sys_pc2epc; // @[mshrs.scala:109:20] reg req_uop_is_unique; // @[mshrs.scala:109:20] reg req_uop_flush_on_commit; // @[mshrs.scala:109:20] reg req_uop_ldst_is_rs1; // @[mshrs.scala:109:20] reg [5:0] req_uop_ldst; // @[mshrs.scala:109:20] reg [5:0] req_uop_lrs1; // @[mshrs.scala:109:20] reg [5:0] req_uop_lrs2; // @[mshrs.scala:109:20] reg [5:0] req_uop_lrs3; // @[mshrs.scala:109:20] reg req_uop_ldst_val; // @[mshrs.scala:109:20] reg [1:0] req_uop_dst_rtype; // @[mshrs.scala:109:20] reg [1:0] req_uop_lrs1_rtype; // @[mshrs.scala:109:20] reg [1:0] req_uop_lrs2_rtype; // @[mshrs.scala:109:20] reg req_uop_frs3_en; // @[mshrs.scala:109:20] reg req_uop_fp_val; // @[mshrs.scala:109:20] reg req_uop_fp_single; // @[mshrs.scala:109:20] reg req_uop_xcpt_pf_if; // @[mshrs.scala:109:20] reg req_uop_xcpt_ae_if; // @[mshrs.scala:109:20] reg req_uop_xcpt_ma_if; // @[mshrs.scala:109:20] reg req_uop_bp_debug_if; // @[mshrs.scala:109:20] reg req_uop_bp_xcpt_if; // @[mshrs.scala:109:20] reg [1:0] req_uop_debug_fsrc; // @[mshrs.scala:109:20] reg [1:0] req_uop_debug_tsrc; // @[mshrs.scala:109:20] reg [33:0] req_addr; // @[mshrs.scala:109:20] assign io_commit_addr_0 = req_addr; // @[mshrs.scala:36:7, :109:20] reg [63:0] req_data; // @[mshrs.scala:109:20] reg req_is_hella; // @[mshrs.scala:109:20] reg req_tag_match; // @[mshrs.scala:109:20] reg [1:0] req_old_meta_coh_state; // @[mshrs.scala:109:20] reg [21:0] req_old_meta_tag; // @[mshrs.scala:109:20] assign io_wb_req_bits_tag_0 = req_old_meta_tag; // @[mshrs.scala:36:7, :109:20] reg [1:0] req_way_en; // @[mshrs.scala:109:20] assign io_way_bits_0 = req_way_en; // @[mshrs.scala:36:7, :109:20] assign io_refill_bits_way_en_0 = req_way_en; // @[mshrs.scala:36:7, :109:20] assign io_meta_write_bits_way_en_0 = req_way_en; // @[mshrs.scala:36:7, :109:20] assign io_meta_read_bits_way_en_0 = req_way_en; // @[mshrs.scala:36:7, :109:20] assign io_wb_req_bits_way_en_0 = req_way_en; // @[mshrs.scala:36:7, :109:20] assign io_replay_bits_way_en_0 = req_way_en; // @[mshrs.scala:36:7, :109:20] reg [4:0] req_sdq_id; // @[mshrs.scala:109:20] assign req_idx = req_addr[9:6]; // @[mshrs.scala:109:20, :110:25] assign io_idx_bits_0 = req_idx; // @[mshrs.scala:36:7, :110:25] assign io_meta_write_bits_idx_0 = req_idx; // @[mshrs.scala:36:7, :110:25] assign io_meta_read_bits_idx_0 = req_idx; // @[mshrs.scala:36:7, :110:25] assign io_wb_req_bits_idx_0 = req_idx; // @[mshrs.scala:36:7, :110:25] assign req_tag = req_addr[33:10]; // @[mshrs.scala:109:20, :111:26] assign io_tag_bits_0 = req_tag; // @[mshrs.scala:36:7, :111:26] wire [27:0] _req_block_addr_T = req_addr[33:6]; // @[mshrs.scala:109:20, :112:34] wire [33:0] req_block_addr = {_req_block_addr_T, 6'h0}; // @[mshrs.scala:112:{34,51}] reg req_needs_wb; // @[mshrs.scala:113:29] reg [1:0] new_coh_state; // @[mshrs.scala:115:24] wire [3:0] _r_T_6 = {2'h2, req_old_meta_coh_state}; // @[Metadata.scala:120:19] wire _r_T_19 = _r_T_6 == 4'h8; // @[Misc.scala:56:20] wire [2:0] _r_T_21 = _r_T_19 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20] wire _r_T_23 = _r_T_6 == 4'h9; // @[Misc.scala:56:20] wire [2:0] _r_T_25 = _r_T_23 ? 3'h2 : _r_T_21; // @[Misc.scala:38:36, :56:20] wire _r_T_27 = _r_T_6 == 4'hA; // @[Misc.scala:56:20] wire [2:0] _r_T_29 = _r_T_27 ? 3'h1 : _r_T_25; // @[Misc.scala:38:36, :56:20] wire _r_T_31 = _r_T_6 == 4'hB; // @[Misc.scala:56:20] wire _r_T_32 = _r_T_31; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_33 = _r_T_31 ? 3'h1 : _r_T_29; // @[Misc.scala:38:36, :56:20] wire _r_T_35 = _r_T_6 == 4'h4; // @[Misc.scala:56:20] wire _r_T_36 = ~_r_T_35 & _r_T_32; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_37 = _r_T_35 ? 3'h5 : _r_T_33; // @[Misc.scala:38:36, :56:20] wire _r_T_39 = _r_T_6 == 4'h5; // @[Misc.scala:56:20] wire _r_T_40 = ~_r_T_39 & _r_T_36; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_41 = _r_T_39 ? 3'h4 : _r_T_37; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_42 = {1'h0, _r_T_39}; // @[Misc.scala:38:63, :56:20] wire _r_T_43 = _r_T_6 == 4'h6; // @[Misc.scala:56:20] wire _r_T_44 = ~_r_T_43 & _r_T_40; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_45 = _r_T_43 ? 3'h0 : _r_T_41; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_46 = _r_T_43 ? 2'h1 : _r_T_42; // @[Misc.scala:38:63, :56:20] wire _r_T_47 = _r_T_6 == 4'h7; // @[Misc.scala:56:20] wire _r_T_48 = _r_T_47 | _r_T_44; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_49 = _r_T_47 ? 3'h0 : _r_T_45; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_50 = _r_T_47 ? 2'h1 : _r_T_46; // @[Misc.scala:38:63, :56:20] wire _r_T_51 = _r_T_6 == 4'h0; // @[Misc.scala:56:20] wire _r_T_52 = ~_r_T_51 & _r_T_48; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_53 = _r_T_51 ? 3'h5 : _r_T_49; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_54 = _r_T_51 ? 2'h0 : _r_T_50; // @[Misc.scala:38:63, :56:20] wire _r_T_55 = _r_T_6 == 4'h1; // @[Misc.scala:56:20] wire _r_T_56 = ~_r_T_55 & _r_T_52; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_57 = _r_T_55 ? 3'h4 : _r_T_53; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_58 = _r_T_55 ? 2'h1 : _r_T_54; // @[Misc.scala:38:63, :56:20] wire _r_T_59 = _r_T_6 == 4'h2; // @[Misc.scala:56:20] wire _r_T_60 = ~_r_T_59 & _r_T_56; // @[Misc.scala:38:9, :56:20] wire [2:0] _r_T_61 = _r_T_59 ? 3'h3 : _r_T_57; // @[Misc.scala:38:36, :56:20] wire [1:0] _r_T_62 = _r_T_59 ? 2'h2 : _r_T_58; // @[Misc.scala:38:63, :56:20] wire _r_T_63 = _r_T_6 == 4'h3; // @[Misc.scala:56:20] wire r_1 = _r_T_63 | _r_T_60; // @[Misc.scala:38:9, :56:20] assign shrink_param = _r_T_63 ? 3'h3 : _r_T_61; // @[Misc.scala:38:36, :56:20] assign io_wb_req_bits_param_0 = shrink_param; // @[Misc.scala:38:36] wire [1:0] r_3 = _r_T_63 ? 2'h2 : _r_T_62; // @[Misc.scala:38:63, :56:20] wire [1:0] coh_on_clear_state = r_3; // @[Misc.scala:38:63] wire _GEN = req_uop_mem_cmd == 5'h1; // @[Consts.scala:90:32] wire _grow_param_r_c_cat_T; // @[Consts.scala:90:32] assign _grow_param_r_c_cat_T = _GEN; // @[Consts.scala:90:32] wire _grow_param_r_c_cat_T_23; // @[Consts.scala:90:32] assign _grow_param_r_c_cat_T_23 = _GEN; // @[Consts.scala:90:32] wire _coh_on_grant_c_cat_T; // @[Consts.scala:90:32] assign _coh_on_grant_c_cat_T = _GEN; // @[Consts.scala:90:32] wire _coh_on_grant_c_cat_T_23; // @[Consts.scala:90:32] assign _coh_on_grant_c_cat_T_23 = _GEN; // @[Consts.scala:90:32] wire _r1_c_cat_T; // @[Consts.scala:90:32] assign _r1_c_cat_T = _GEN; // @[Consts.scala:90:32] wire _r1_c_cat_T_23; // @[Consts.scala:90:32] assign _r1_c_cat_T_23 = _GEN; // @[Consts.scala:90:32] wire _needs_second_acq_T_27; // @[Consts.scala:90:32] assign _needs_second_acq_T_27 = _GEN; // @[Consts.scala:90:32] wire _GEN_0 = req_uop_mem_cmd == 5'h11; // @[Consts.scala:90:49] wire _grow_param_r_c_cat_T_1; // @[Consts.scala:90:49] assign _grow_param_r_c_cat_T_1 = _GEN_0; // @[Consts.scala:90:49] wire _grow_param_r_c_cat_T_24; // @[Consts.scala:90:49] assign _grow_param_r_c_cat_T_24 = _GEN_0; // @[Consts.scala:90:49] wire _coh_on_grant_c_cat_T_1; // @[Consts.scala:90:49] assign _coh_on_grant_c_cat_T_1 = _GEN_0; // @[Consts.scala:90:49] wire _coh_on_grant_c_cat_T_24; // @[Consts.scala:90:49] assign _coh_on_grant_c_cat_T_24 = _GEN_0; // @[Consts.scala:90:49] wire _r1_c_cat_T_1; // @[Consts.scala:90:49] assign _r1_c_cat_T_1 = _GEN_0; // @[Consts.scala:90:49] wire _r1_c_cat_T_24; // @[Consts.scala:90:49] assign _r1_c_cat_T_24 = _GEN_0; // @[Consts.scala:90:49] wire _needs_second_acq_T_28; // @[Consts.scala:90:49] assign _needs_second_acq_T_28 = _GEN_0; // @[Consts.scala:90:49] wire _grow_param_r_c_cat_T_2 = _grow_param_r_c_cat_T | _grow_param_r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _GEN_1 = req_uop_mem_cmd == 5'h7; // @[Consts.scala:90:66] wire _grow_param_r_c_cat_T_3; // @[Consts.scala:90:66] assign _grow_param_r_c_cat_T_3 = _GEN_1; // @[Consts.scala:90:66] wire _grow_param_r_c_cat_T_26; // @[Consts.scala:90:66] assign _grow_param_r_c_cat_T_26 = _GEN_1; // @[Consts.scala:90:66] wire _coh_on_grant_c_cat_T_3; // @[Consts.scala:90:66] assign _coh_on_grant_c_cat_T_3 = _GEN_1; // @[Consts.scala:90:66] wire _coh_on_grant_c_cat_T_26; // @[Consts.scala:90:66] assign _coh_on_grant_c_cat_T_26 = _GEN_1; // @[Consts.scala:90:66] wire _r1_c_cat_T_3; // @[Consts.scala:90:66] assign _r1_c_cat_T_3 = _GEN_1; // @[Consts.scala:90:66] wire _r1_c_cat_T_26; // @[Consts.scala:90:66] assign _r1_c_cat_T_26 = _GEN_1; // @[Consts.scala:90:66] wire _needs_second_acq_T_30; // @[Consts.scala:90:66] assign _needs_second_acq_T_30 = _GEN_1; // @[Consts.scala:90:66] wire _grow_param_r_c_cat_T_4 = _grow_param_r_c_cat_T_2 | _grow_param_r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _GEN_2 = req_uop_mem_cmd == 5'h4; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_5; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_5 = _GEN_2; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_28; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_28 = _GEN_2; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_5; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_5 = _GEN_2; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_28; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_28 = _GEN_2; // @[package.scala:16:47] wire _r1_c_cat_T_5; // @[package.scala:16:47] assign _r1_c_cat_T_5 = _GEN_2; // @[package.scala:16:47] wire _r1_c_cat_T_28; // @[package.scala:16:47] assign _r1_c_cat_T_28 = _GEN_2; // @[package.scala:16:47] wire _needs_second_acq_T_32; // @[package.scala:16:47] assign _needs_second_acq_T_32 = _GEN_2; // @[package.scala:16:47] wire _GEN_3 = req_uop_mem_cmd == 5'h9; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_6; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_6 = _GEN_3; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_29; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_29 = _GEN_3; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_6; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_6 = _GEN_3; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_29; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_29 = _GEN_3; // @[package.scala:16:47] wire _r1_c_cat_T_6; // @[package.scala:16:47] assign _r1_c_cat_T_6 = _GEN_3; // @[package.scala:16:47] wire _r1_c_cat_T_29; // @[package.scala:16:47] assign _r1_c_cat_T_29 = _GEN_3; // @[package.scala:16:47] wire _needs_second_acq_T_33; // @[package.scala:16:47] assign _needs_second_acq_T_33 = _GEN_3; // @[package.scala:16:47] wire _GEN_4 = req_uop_mem_cmd == 5'hA; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_7; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_7 = _GEN_4; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_30; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_30 = _GEN_4; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_7; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_7 = _GEN_4; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_30; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_30 = _GEN_4; // @[package.scala:16:47] wire _r1_c_cat_T_7; // @[package.scala:16:47] assign _r1_c_cat_T_7 = _GEN_4; // @[package.scala:16:47] wire _r1_c_cat_T_30; // @[package.scala:16:47] assign _r1_c_cat_T_30 = _GEN_4; // @[package.scala:16:47] wire _needs_second_acq_T_34; // @[package.scala:16:47] assign _needs_second_acq_T_34 = _GEN_4; // @[package.scala:16:47] wire _GEN_5 = req_uop_mem_cmd == 5'hB; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_8; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_8 = _GEN_5; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_31; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_31 = _GEN_5; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_8; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_8 = _GEN_5; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_31; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_31 = _GEN_5; // @[package.scala:16:47] wire _r1_c_cat_T_8; // @[package.scala:16:47] assign _r1_c_cat_T_8 = _GEN_5; // @[package.scala:16:47] wire _r1_c_cat_T_31; // @[package.scala:16:47] assign _r1_c_cat_T_31 = _GEN_5; // @[package.scala:16:47] wire _needs_second_acq_T_35; // @[package.scala:16:47] assign _needs_second_acq_T_35 = _GEN_5; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_9 = _grow_param_r_c_cat_T_5 | _grow_param_r_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_10 = _grow_param_r_c_cat_T_9 | _grow_param_r_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_11 = _grow_param_r_c_cat_T_10 | _grow_param_r_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _GEN_6 = req_uop_mem_cmd == 5'h8; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_12; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_12 = _GEN_6; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_35; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_35 = _GEN_6; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_12; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_12 = _GEN_6; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_35; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_35 = _GEN_6; // @[package.scala:16:47] wire _r1_c_cat_T_12; // @[package.scala:16:47] assign _r1_c_cat_T_12 = _GEN_6; // @[package.scala:16:47] wire _r1_c_cat_T_35; // @[package.scala:16:47] assign _r1_c_cat_T_35 = _GEN_6; // @[package.scala:16:47] wire _needs_second_acq_T_39; // @[package.scala:16:47] assign _needs_second_acq_T_39 = _GEN_6; // @[package.scala:16:47] wire _GEN_7 = req_uop_mem_cmd == 5'hC; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_13; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_13 = _GEN_7; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_36; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_36 = _GEN_7; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_13; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_13 = _GEN_7; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_36; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_36 = _GEN_7; // @[package.scala:16:47] wire _r1_c_cat_T_13; // @[package.scala:16:47] assign _r1_c_cat_T_13 = _GEN_7; // @[package.scala:16:47] wire _r1_c_cat_T_36; // @[package.scala:16:47] assign _r1_c_cat_T_36 = _GEN_7; // @[package.scala:16:47] wire _needs_second_acq_T_40; // @[package.scala:16:47] assign _needs_second_acq_T_40 = _GEN_7; // @[package.scala:16:47] wire _GEN_8 = req_uop_mem_cmd == 5'hD; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_14; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_14 = _GEN_8; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_37; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_37 = _GEN_8; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_14; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_14 = _GEN_8; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_37; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_37 = _GEN_8; // @[package.scala:16:47] wire _r1_c_cat_T_14; // @[package.scala:16:47] assign _r1_c_cat_T_14 = _GEN_8; // @[package.scala:16:47] wire _r1_c_cat_T_37; // @[package.scala:16:47] assign _r1_c_cat_T_37 = _GEN_8; // @[package.scala:16:47] wire _needs_second_acq_T_41; // @[package.scala:16:47] assign _needs_second_acq_T_41 = _GEN_8; // @[package.scala:16:47] wire _GEN_9 = req_uop_mem_cmd == 5'hE; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_15; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_15 = _GEN_9; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_38; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_38 = _GEN_9; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_15; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_15 = _GEN_9; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_38; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_38 = _GEN_9; // @[package.scala:16:47] wire _r1_c_cat_T_15; // @[package.scala:16:47] assign _r1_c_cat_T_15 = _GEN_9; // @[package.scala:16:47] wire _r1_c_cat_T_38; // @[package.scala:16:47] assign _r1_c_cat_T_38 = _GEN_9; // @[package.scala:16:47] wire _needs_second_acq_T_42; // @[package.scala:16:47] assign _needs_second_acq_T_42 = _GEN_9; // @[package.scala:16:47] wire _GEN_10 = req_uop_mem_cmd == 5'hF; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_16; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_16 = _GEN_10; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_39; // @[package.scala:16:47] assign _grow_param_r_c_cat_T_39 = _GEN_10; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_16; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_16 = _GEN_10; // @[package.scala:16:47] wire _coh_on_grant_c_cat_T_39; // @[package.scala:16:47] assign _coh_on_grant_c_cat_T_39 = _GEN_10; // @[package.scala:16:47] wire _r1_c_cat_T_16; // @[package.scala:16:47] assign _r1_c_cat_T_16 = _GEN_10; // @[package.scala:16:47] wire _r1_c_cat_T_39; // @[package.scala:16:47] assign _r1_c_cat_T_39 = _GEN_10; // @[package.scala:16:47] wire _needs_second_acq_T_43; // @[package.scala:16:47] assign _needs_second_acq_T_43 = _GEN_10; // @[package.scala:16:47] wire _grow_param_r_c_cat_T_17 = _grow_param_r_c_cat_T_12 | _grow_param_r_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_18 = _grow_param_r_c_cat_T_17 | _grow_param_r_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_19 = _grow_param_r_c_cat_T_18 | _grow_param_r_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_20 = _grow_param_r_c_cat_T_19 | _grow_param_r_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_21 = _grow_param_r_c_cat_T_11 | _grow_param_r_c_cat_T_20; // @[package.scala:81:59] wire _grow_param_r_c_cat_T_22 = _grow_param_r_c_cat_T_4 | _grow_param_r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _grow_param_r_c_cat_T_25 = _grow_param_r_c_cat_T_23 | _grow_param_r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _grow_param_r_c_cat_T_27 = _grow_param_r_c_cat_T_25 | _grow_param_r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _grow_param_r_c_cat_T_32 = _grow_param_r_c_cat_T_28 | _grow_param_r_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_33 = _grow_param_r_c_cat_T_32 | _grow_param_r_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_34 = _grow_param_r_c_cat_T_33 | _grow_param_r_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_40 = _grow_param_r_c_cat_T_35 | _grow_param_r_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_41 = _grow_param_r_c_cat_T_40 | _grow_param_r_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_42 = _grow_param_r_c_cat_T_41 | _grow_param_r_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_43 = _grow_param_r_c_cat_T_42 | _grow_param_r_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _grow_param_r_c_cat_T_44 = _grow_param_r_c_cat_T_34 | _grow_param_r_c_cat_T_43; // @[package.scala:81:59] wire _grow_param_r_c_cat_T_45 = _grow_param_r_c_cat_T_27 | _grow_param_r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _GEN_11 = req_uop_mem_cmd == 5'h3; // @[Consts.scala:91:54] wire _grow_param_r_c_cat_T_46; // @[Consts.scala:91:54] assign _grow_param_r_c_cat_T_46 = _GEN_11; // @[Consts.scala:91:54] wire _coh_on_grant_c_cat_T_46; // @[Consts.scala:91:54] assign _coh_on_grant_c_cat_T_46 = _GEN_11; // @[Consts.scala:91:54] wire _r1_c_cat_T_46; // @[Consts.scala:91:54] assign _r1_c_cat_T_46 = _GEN_11; // @[Consts.scala:91:54] wire _needs_second_acq_T_50; // @[Consts.scala:91:54] assign _needs_second_acq_T_50 = _GEN_11; // @[Consts.scala:91:54] wire _grow_param_r_c_cat_T_47 = _grow_param_r_c_cat_T_45 | _grow_param_r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _GEN_12 = req_uop_mem_cmd == 5'h6; // @[Consts.scala:91:71] wire _grow_param_r_c_cat_T_48; // @[Consts.scala:91:71] assign _grow_param_r_c_cat_T_48 = _GEN_12; // @[Consts.scala:91:71] wire _coh_on_grant_c_cat_T_48; // @[Consts.scala:91:71] assign _coh_on_grant_c_cat_T_48 = _GEN_12; // @[Consts.scala:91:71] wire _r1_c_cat_T_48; // @[Consts.scala:91:71] assign _r1_c_cat_T_48 = _GEN_12; // @[Consts.scala:91:71] wire _needs_second_acq_T_52; // @[Consts.scala:91:71] assign _needs_second_acq_T_52 = _GEN_12; // @[Consts.scala:91:71] wire _grow_param_r_c_cat_T_49 = _grow_param_r_c_cat_T_47 | _grow_param_r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] grow_param_r_c = {_grow_param_r_c_cat_T_22, _grow_param_r_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _grow_param_r_T = {grow_param_r_c, new_coh_state}; // @[Metadata.scala:29:18, :58:19] wire _grow_param_r_T_25 = _grow_param_r_T == 4'hC; // @[Misc.scala:49:20] wire [1:0] _grow_param_r_T_27 = {1'h0, _grow_param_r_T_25}; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_28 = _grow_param_r_T == 4'hD; // @[Misc.scala:49:20] wire [1:0] _grow_param_r_T_30 = _grow_param_r_T_28 ? 2'h2 : _grow_param_r_T_27; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_31 = _grow_param_r_T == 4'h4; // @[Misc.scala:49:20] wire [1:0] _grow_param_r_T_33 = _grow_param_r_T_31 ? 2'h1 : _grow_param_r_T_30; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_34 = _grow_param_r_T == 4'h5; // @[Misc.scala:49:20] wire [1:0] _grow_param_r_T_36 = _grow_param_r_T_34 ? 2'h2 : _grow_param_r_T_33; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_37 = _grow_param_r_T == 4'h0; // @[Misc.scala:49:20] wire [1:0] _grow_param_r_T_39 = _grow_param_r_T_37 ? 2'h0 : _grow_param_r_T_36; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_40 = _grow_param_r_T == 4'hE; // @[Misc.scala:49:20] wire _grow_param_r_T_41 = _grow_param_r_T_40; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_42 = _grow_param_r_T_40 ? 2'h3 : _grow_param_r_T_39; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_43 = &_grow_param_r_T; // @[Misc.scala:49:20] wire _grow_param_r_T_44 = _grow_param_r_T_43 | _grow_param_r_T_41; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_45 = _grow_param_r_T_43 ? 2'h3 : _grow_param_r_T_42; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_46 = _grow_param_r_T == 4'h6; // @[Misc.scala:49:20] wire _grow_param_r_T_47 = _grow_param_r_T_46 | _grow_param_r_T_44; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_48 = _grow_param_r_T_46 ? 2'h2 : _grow_param_r_T_45; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_49 = _grow_param_r_T == 4'h7; // @[Misc.scala:49:20] wire _grow_param_r_T_50 = _grow_param_r_T_49 | _grow_param_r_T_47; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_51 = _grow_param_r_T_49 ? 2'h3 : _grow_param_r_T_48; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_52 = _grow_param_r_T == 4'h1; // @[Misc.scala:49:20] wire _grow_param_r_T_53 = _grow_param_r_T_52 | _grow_param_r_T_50; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_54 = _grow_param_r_T_52 ? 2'h1 : _grow_param_r_T_51; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_55 = _grow_param_r_T == 4'h2; // @[Misc.scala:49:20] wire _grow_param_r_T_56 = _grow_param_r_T_55 | _grow_param_r_T_53; // @[Misc.scala:35:9, :49:20] wire [1:0] _grow_param_r_T_57 = _grow_param_r_T_55 ? 2'h2 : _grow_param_r_T_54; // @[Misc.scala:35:36, :49:20] wire _grow_param_r_T_58 = _grow_param_r_T == 4'h3; // @[Misc.scala:49:20] wire grow_param_r_1 = _grow_param_r_T_58 | _grow_param_r_T_56; // @[Misc.scala:35:9, :49:20] wire [1:0] grow_param = _grow_param_r_T_58 ? 2'h3 : _grow_param_r_T_57; // @[Misc.scala:35:36, :49:20] wire [1:0] grow_param_meta_state = grow_param; // @[Misc.scala:35:36] wire _coh_on_grant_c_cat_T_2 = _coh_on_grant_c_cat_T | _coh_on_grant_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _coh_on_grant_c_cat_T_4 = _coh_on_grant_c_cat_T_2 | _coh_on_grant_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _coh_on_grant_c_cat_T_9 = _coh_on_grant_c_cat_T_5 | _coh_on_grant_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_10 = _coh_on_grant_c_cat_T_9 | _coh_on_grant_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_11 = _coh_on_grant_c_cat_T_10 | _coh_on_grant_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_17 = _coh_on_grant_c_cat_T_12 | _coh_on_grant_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_18 = _coh_on_grant_c_cat_T_17 | _coh_on_grant_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_19 = _coh_on_grant_c_cat_T_18 | _coh_on_grant_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_20 = _coh_on_grant_c_cat_T_19 | _coh_on_grant_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_21 = _coh_on_grant_c_cat_T_11 | _coh_on_grant_c_cat_T_20; // @[package.scala:81:59] wire _coh_on_grant_c_cat_T_22 = _coh_on_grant_c_cat_T_4 | _coh_on_grant_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _coh_on_grant_c_cat_T_25 = _coh_on_grant_c_cat_T_23 | _coh_on_grant_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _coh_on_grant_c_cat_T_27 = _coh_on_grant_c_cat_T_25 | _coh_on_grant_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _coh_on_grant_c_cat_T_32 = _coh_on_grant_c_cat_T_28 | _coh_on_grant_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_33 = _coh_on_grant_c_cat_T_32 | _coh_on_grant_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_34 = _coh_on_grant_c_cat_T_33 | _coh_on_grant_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_40 = _coh_on_grant_c_cat_T_35 | _coh_on_grant_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_41 = _coh_on_grant_c_cat_T_40 | _coh_on_grant_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_42 = _coh_on_grant_c_cat_T_41 | _coh_on_grant_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_43 = _coh_on_grant_c_cat_T_42 | _coh_on_grant_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _coh_on_grant_c_cat_T_44 = _coh_on_grant_c_cat_T_34 | _coh_on_grant_c_cat_T_43; // @[package.scala:81:59] wire _coh_on_grant_c_cat_T_45 = _coh_on_grant_c_cat_T_27 | _coh_on_grant_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _coh_on_grant_c_cat_T_47 = _coh_on_grant_c_cat_T_45 | _coh_on_grant_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _coh_on_grant_c_cat_T_49 = _coh_on_grant_c_cat_T_47 | _coh_on_grant_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] coh_on_grant_c = {_coh_on_grant_c_cat_T_22, _coh_on_grant_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _coh_on_grant_T = {coh_on_grant_c, io_mem_grant_bits_param_0}; // @[Metadata.scala:29:18, :84:18] wire _coh_on_grant_T_9 = _coh_on_grant_T == 4'h1; // @[Metadata.scala:84:{18,38}] wire [1:0] _coh_on_grant_T_10 = {1'h0, _coh_on_grant_T_9}; // @[Metadata.scala:84:38] wire _coh_on_grant_T_11 = _coh_on_grant_T == 4'h0; // @[Metadata.scala:84:{18,38}] wire [1:0] _coh_on_grant_T_12 = _coh_on_grant_T_11 ? 2'h2 : _coh_on_grant_T_10; // @[Metadata.scala:84:38] wire _coh_on_grant_T_13 = _coh_on_grant_T == 4'h4; // @[Metadata.scala:84:{18,38}] wire [1:0] _coh_on_grant_T_14 = _coh_on_grant_T_13 ? 2'h2 : _coh_on_grant_T_12; // @[Metadata.scala:84:38] wire _coh_on_grant_T_15 = _coh_on_grant_T == 4'hC; // @[Metadata.scala:84:{18,38}] wire [1:0] _coh_on_grant_T_16 = _coh_on_grant_T_15 ? 2'h3 : _coh_on_grant_T_14; // @[Metadata.scala:84:38] assign coh_on_grant_state = _coh_on_grant_T_16; // @[Metadata.scala:84:38, :160:20] assign io_commit_coh_state_0 = coh_on_grant_state; // @[Metadata.scala:160:20] wire _r1_c_cat_T_2 = _r1_c_cat_T | _r1_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _r1_c_cat_T_4 = _r1_c_cat_T_2 | _r1_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _r1_c_cat_T_9 = _r1_c_cat_T_5 | _r1_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_10 = _r1_c_cat_T_9 | _r1_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_11 = _r1_c_cat_T_10 | _r1_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_17 = _r1_c_cat_T_12 | _r1_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_18 = _r1_c_cat_T_17 | _r1_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_19 = _r1_c_cat_T_18 | _r1_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_20 = _r1_c_cat_T_19 | _r1_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_21 = _r1_c_cat_T_11 | _r1_c_cat_T_20; // @[package.scala:81:59] wire _r1_c_cat_T_22 = _r1_c_cat_T_4 | _r1_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _r1_c_cat_T_25 = _r1_c_cat_T_23 | _r1_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _r1_c_cat_T_27 = _r1_c_cat_T_25 | _r1_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _r1_c_cat_T_32 = _r1_c_cat_T_28 | _r1_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_33 = _r1_c_cat_T_32 | _r1_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_34 = _r1_c_cat_T_33 | _r1_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_40 = _r1_c_cat_T_35 | _r1_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_41 = _r1_c_cat_T_40 | _r1_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_42 = _r1_c_cat_T_41 | _r1_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_43 = _r1_c_cat_T_42 | _r1_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _r1_c_cat_T_44 = _r1_c_cat_T_34 | _r1_c_cat_T_43; // @[package.scala:81:59] wire _r1_c_cat_T_45 = _r1_c_cat_T_27 | _r1_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _r1_c_cat_T_47 = _r1_c_cat_T_45 | _r1_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _r1_c_cat_T_49 = _r1_c_cat_T_47 | _r1_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] r1_c = {_r1_c_cat_T_22, _r1_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _r1_T = {r1_c, new_coh_state}; // @[Metadata.scala:29:18, :58:19] wire _r1_T_25 = _r1_T == 4'hC; // @[Misc.scala:49:20] wire [1:0] _r1_T_27 = {1'h0, _r1_T_25}; // @[Misc.scala:35:36, :49:20] wire _r1_T_28 = _r1_T == 4'hD; // @[Misc.scala:49:20] wire [1:0] _r1_T_30 = _r1_T_28 ? 2'h2 : _r1_T_27; // @[Misc.scala:35:36, :49:20] wire _r1_T_31 = _r1_T == 4'h4; // @[Misc.scala:49:20] wire [1:0] _r1_T_33 = _r1_T_31 ? 2'h1 : _r1_T_30; // @[Misc.scala:35:36, :49:20] wire _r1_T_34 = _r1_T == 4'h5; // @[Misc.scala:49:20] wire [1:0] _r1_T_36 = _r1_T_34 ? 2'h2 : _r1_T_33; // @[Misc.scala:35:36, :49:20] wire _r1_T_37 = _r1_T == 4'h0; // @[Misc.scala:49:20] wire [1:0] _r1_T_39 = _r1_T_37 ? 2'h0 : _r1_T_36; // @[Misc.scala:35:36, :49:20] wire _r1_T_40 = _r1_T == 4'hE; // @[Misc.scala:49:20] wire _r1_T_41 = _r1_T_40; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_42 = _r1_T_40 ? 2'h3 : _r1_T_39; // @[Misc.scala:35:36, :49:20] wire _r1_T_43 = &_r1_T; // @[Misc.scala:49:20] wire _r1_T_44 = _r1_T_43 | _r1_T_41; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_45 = _r1_T_43 ? 2'h3 : _r1_T_42; // @[Misc.scala:35:36, :49:20] wire _r1_T_46 = _r1_T == 4'h6; // @[Misc.scala:49:20] wire _r1_T_47 = _r1_T_46 | _r1_T_44; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_48 = _r1_T_46 ? 2'h2 : _r1_T_45; // @[Misc.scala:35:36, :49:20] wire _r1_T_49 = _r1_T == 4'h7; // @[Misc.scala:49:20] wire _r1_T_50 = _r1_T_49 | _r1_T_47; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_51 = _r1_T_49 ? 2'h3 : _r1_T_48; // @[Misc.scala:35:36, :49:20] wire _r1_T_52 = _r1_T == 4'h1; // @[Misc.scala:49:20] wire _r1_T_53 = _r1_T_52 | _r1_T_50; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_54 = _r1_T_52 ? 2'h1 : _r1_T_51; // @[Misc.scala:35:36, :49:20] wire _r1_T_55 = _r1_T == 4'h2; // @[Misc.scala:49:20] wire _r1_T_56 = _r1_T_55 | _r1_T_53; // @[Misc.scala:35:9, :49:20] wire [1:0] _r1_T_57 = _r1_T_55 ? 2'h2 : _r1_T_54; // @[Misc.scala:35:36, :49:20] wire _r1_T_58 = _r1_T == 4'h3; // @[Misc.scala:49:20] wire r1_1 = _r1_T_58 | _r1_T_56; // @[Misc.scala:35:9, :49:20] wire [1:0] r1_2 = _r1_T_58 ? 2'h3 : _r1_T_57; // @[Misc.scala:35:36, :49:20] wire _GEN_13 = io_req_uop_mem_cmd_0 == 5'h1; // @[Consts.scala:90:32] wire _r2_c_cat_T; // @[Consts.scala:90:32] assign _r2_c_cat_T = _GEN_13; // @[Consts.scala:90:32] wire _r2_c_cat_T_23; // @[Consts.scala:90:32] assign _r2_c_cat_T_23 = _GEN_13; // @[Consts.scala:90:32] wire _needs_second_acq_T; // @[Consts.scala:90:32] assign _needs_second_acq_T = _GEN_13; // @[Consts.scala:90:32] wire _dirties_cat_T; // @[Consts.scala:90:32] assign _dirties_cat_T = _GEN_13; // @[Consts.scala:90:32] wire _dirties_cat_T_23; // @[Consts.scala:90:32] assign _dirties_cat_T_23 = _GEN_13; // @[Consts.scala:90:32] wire _state_r_c_cat_T; // @[Consts.scala:90:32] assign _state_r_c_cat_T = _GEN_13; // @[Consts.scala:90:32] wire _state_r_c_cat_T_23; // @[Consts.scala:90:32] assign _state_r_c_cat_T_23 = _GEN_13; // @[Consts.scala:90:32] wire _state_T_3; // @[Consts.scala:90:32] assign _state_T_3 = _GEN_13; // @[Consts.scala:90:32] wire _r_c_cat_T_50; // @[Consts.scala:90:32] assign _r_c_cat_T_50 = _GEN_13; // @[Consts.scala:90:32] wire _r_c_cat_T_73; // @[Consts.scala:90:32] assign _r_c_cat_T_73 = _GEN_13; // @[Consts.scala:90:32] wire _state_r_c_cat_T_50; // @[Consts.scala:90:32] assign _state_r_c_cat_T_50 = _GEN_13; // @[Consts.scala:90:32] wire _state_r_c_cat_T_73; // @[Consts.scala:90:32] assign _state_r_c_cat_T_73 = _GEN_13; // @[Consts.scala:90:32] wire _state_T_37; // @[Consts.scala:90:32] assign _state_T_37 = _GEN_13; // @[Consts.scala:90:32] wire _GEN_14 = io_req_uop_mem_cmd_0 == 5'h11; // @[Consts.scala:90:49] wire _r2_c_cat_T_1; // @[Consts.scala:90:49] assign _r2_c_cat_T_1 = _GEN_14; // @[Consts.scala:90:49] wire _r2_c_cat_T_24; // @[Consts.scala:90:49] assign _r2_c_cat_T_24 = _GEN_14; // @[Consts.scala:90:49] wire _needs_second_acq_T_1; // @[Consts.scala:90:49] assign _needs_second_acq_T_1 = _GEN_14; // @[Consts.scala:90:49] wire _dirties_cat_T_1; // @[Consts.scala:90:49] assign _dirties_cat_T_1 = _GEN_14; // @[Consts.scala:90:49] wire _dirties_cat_T_24; // @[Consts.scala:90:49] assign _dirties_cat_T_24 = _GEN_14; // @[Consts.scala:90:49] wire _state_r_c_cat_T_1; // @[Consts.scala:90:49] assign _state_r_c_cat_T_1 = _GEN_14; // @[Consts.scala:90:49] wire _state_r_c_cat_T_24; // @[Consts.scala:90:49] assign _state_r_c_cat_T_24 = _GEN_14; // @[Consts.scala:90:49] wire _state_T_4; // @[Consts.scala:90:49] assign _state_T_4 = _GEN_14; // @[Consts.scala:90:49] wire _r_c_cat_T_51; // @[Consts.scala:90:49] assign _r_c_cat_T_51 = _GEN_14; // @[Consts.scala:90:49] wire _r_c_cat_T_74; // @[Consts.scala:90:49] assign _r_c_cat_T_74 = _GEN_14; // @[Consts.scala:90:49] wire _state_r_c_cat_T_51; // @[Consts.scala:90:49] assign _state_r_c_cat_T_51 = _GEN_14; // @[Consts.scala:90:49] wire _state_r_c_cat_T_74; // @[Consts.scala:90:49] assign _state_r_c_cat_T_74 = _GEN_14; // @[Consts.scala:90:49] wire _state_T_38; // @[Consts.scala:90:49] assign _state_T_38 = _GEN_14; // @[Consts.scala:90:49] wire _r2_c_cat_T_2 = _r2_c_cat_T | _r2_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _GEN_15 = io_req_uop_mem_cmd_0 == 5'h7; // @[Consts.scala:90:66] wire _r2_c_cat_T_3; // @[Consts.scala:90:66] assign _r2_c_cat_T_3 = _GEN_15; // @[Consts.scala:90:66] wire _r2_c_cat_T_26; // @[Consts.scala:90:66] assign _r2_c_cat_T_26 = _GEN_15; // @[Consts.scala:90:66] wire _needs_second_acq_T_3; // @[Consts.scala:90:66] assign _needs_second_acq_T_3 = _GEN_15; // @[Consts.scala:90:66] wire _dirties_cat_T_3; // @[Consts.scala:90:66] assign _dirties_cat_T_3 = _GEN_15; // @[Consts.scala:90:66] wire _dirties_cat_T_26; // @[Consts.scala:90:66] assign _dirties_cat_T_26 = _GEN_15; // @[Consts.scala:90:66] wire _state_r_c_cat_T_3; // @[Consts.scala:90:66] assign _state_r_c_cat_T_3 = _GEN_15; // @[Consts.scala:90:66] wire _state_r_c_cat_T_26; // @[Consts.scala:90:66] assign _state_r_c_cat_T_26 = _GEN_15; // @[Consts.scala:90:66] wire _state_T_6; // @[Consts.scala:90:66] assign _state_T_6 = _GEN_15; // @[Consts.scala:90:66] wire _r_c_cat_T_53; // @[Consts.scala:90:66] assign _r_c_cat_T_53 = _GEN_15; // @[Consts.scala:90:66] wire _r_c_cat_T_76; // @[Consts.scala:90:66] assign _r_c_cat_T_76 = _GEN_15; // @[Consts.scala:90:66] wire _state_r_c_cat_T_53; // @[Consts.scala:90:66] assign _state_r_c_cat_T_53 = _GEN_15; // @[Consts.scala:90:66] wire _state_r_c_cat_T_76; // @[Consts.scala:90:66] assign _state_r_c_cat_T_76 = _GEN_15; // @[Consts.scala:90:66] wire _state_T_40; // @[Consts.scala:90:66] assign _state_T_40 = _GEN_15; // @[Consts.scala:90:66] wire _r2_c_cat_T_4 = _r2_c_cat_T_2 | _r2_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _GEN_16 = io_req_uop_mem_cmd_0 == 5'h4; // @[package.scala:16:47] wire _r2_c_cat_T_5; // @[package.scala:16:47] assign _r2_c_cat_T_5 = _GEN_16; // @[package.scala:16:47] wire _r2_c_cat_T_28; // @[package.scala:16:47] assign _r2_c_cat_T_28 = _GEN_16; // @[package.scala:16:47] wire _needs_second_acq_T_5; // @[package.scala:16:47] assign _needs_second_acq_T_5 = _GEN_16; // @[package.scala:16:47] wire _dirties_cat_T_5; // @[package.scala:16:47] assign _dirties_cat_T_5 = _GEN_16; // @[package.scala:16:47] wire _dirties_cat_T_28; // @[package.scala:16:47] assign _dirties_cat_T_28 = _GEN_16; // @[package.scala:16:47] wire _state_r_c_cat_T_5; // @[package.scala:16:47] assign _state_r_c_cat_T_5 = _GEN_16; // @[package.scala:16:47] wire _state_r_c_cat_T_28; // @[package.scala:16:47] assign _state_r_c_cat_T_28 = _GEN_16; // @[package.scala:16:47] wire _state_T_8; // @[package.scala:16:47] assign _state_T_8 = _GEN_16; // @[package.scala:16:47] wire _r_c_cat_T_55; // @[package.scala:16:47] assign _r_c_cat_T_55 = _GEN_16; // @[package.scala:16:47] wire _r_c_cat_T_78; // @[package.scala:16:47] assign _r_c_cat_T_78 = _GEN_16; // @[package.scala:16:47] wire _state_r_c_cat_T_55; // @[package.scala:16:47] assign _state_r_c_cat_T_55 = _GEN_16; // @[package.scala:16:47] wire _state_r_c_cat_T_78; // @[package.scala:16:47] assign _state_r_c_cat_T_78 = _GEN_16; // @[package.scala:16:47] wire _state_T_42; // @[package.scala:16:47] assign _state_T_42 = _GEN_16; // @[package.scala:16:47] wire _GEN_17 = io_req_uop_mem_cmd_0 == 5'h9; // @[package.scala:16:47] wire _r2_c_cat_T_6; // @[package.scala:16:47] assign _r2_c_cat_T_6 = _GEN_17; // @[package.scala:16:47] wire _r2_c_cat_T_29; // @[package.scala:16:47] assign _r2_c_cat_T_29 = _GEN_17; // @[package.scala:16:47] wire _needs_second_acq_T_6; // @[package.scala:16:47] assign _needs_second_acq_T_6 = _GEN_17; // @[package.scala:16:47] wire _dirties_cat_T_6; // @[package.scala:16:47] assign _dirties_cat_T_6 = _GEN_17; // @[package.scala:16:47] wire _dirties_cat_T_29; // @[package.scala:16:47] assign _dirties_cat_T_29 = _GEN_17; // @[package.scala:16:47] wire _state_r_c_cat_T_6; // @[package.scala:16:47] assign _state_r_c_cat_T_6 = _GEN_17; // @[package.scala:16:47] wire _state_r_c_cat_T_29; // @[package.scala:16:47] assign _state_r_c_cat_T_29 = _GEN_17; // @[package.scala:16:47] wire _state_T_9; // @[package.scala:16:47] assign _state_T_9 = _GEN_17; // @[package.scala:16:47] wire _r_c_cat_T_56; // @[package.scala:16:47] assign _r_c_cat_T_56 = _GEN_17; // @[package.scala:16:47] wire _r_c_cat_T_79; // @[package.scala:16:47] assign _r_c_cat_T_79 = _GEN_17; // @[package.scala:16:47] wire _state_r_c_cat_T_56; // @[package.scala:16:47] assign _state_r_c_cat_T_56 = _GEN_17; // @[package.scala:16:47] wire _state_r_c_cat_T_79; // @[package.scala:16:47] assign _state_r_c_cat_T_79 = _GEN_17; // @[package.scala:16:47] wire _state_T_43; // @[package.scala:16:47] assign _state_T_43 = _GEN_17; // @[package.scala:16:47] wire _GEN_18 = io_req_uop_mem_cmd_0 == 5'hA; // @[package.scala:16:47] wire _r2_c_cat_T_7; // @[package.scala:16:47] assign _r2_c_cat_T_7 = _GEN_18; // @[package.scala:16:47] wire _r2_c_cat_T_30; // @[package.scala:16:47] assign _r2_c_cat_T_30 = _GEN_18; // @[package.scala:16:47] wire _needs_second_acq_T_7; // @[package.scala:16:47] assign _needs_second_acq_T_7 = _GEN_18; // @[package.scala:16:47] wire _dirties_cat_T_7; // @[package.scala:16:47] assign _dirties_cat_T_7 = _GEN_18; // @[package.scala:16:47] wire _dirties_cat_T_30; // @[package.scala:16:47] assign _dirties_cat_T_30 = _GEN_18; // @[package.scala:16:47] wire _state_r_c_cat_T_7; // @[package.scala:16:47] assign _state_r_c_cat_T_7 = _GEN_18; // @[package.scala:16:47] wire _state_r_c_cat_T_30; // @[package.scala:16:47] assign _state_r_c_cat_T_30 = _GEN_18; // @[package.scala:16:47] wire _state_T_10; // @[package.scala:16:47] assign _state_T_10 = _GEN_18; // @[package.scala:16:47] wire _r_c_cat_T_57; // @[package.scala:16:47] assign _r_c_cat_T_57 = _GEN_18; // @[package.scala:16:47] wire _r_c_cat_T_80; // @[package.scala:16:47] assign _r_c_cat_T_80 = _GEN_18; // @[package.scala:16:47] wire _state_r_c_cat_T_57; // @[package.scala:16:47] assign _state_r_c_cat_T_57 = _GEN_18; // @[package.scala:16:47] wire _state_r_c_cat_T_80; // @[package.scala:16:47] assign _state_r_c_cat_T_80 = _GEN_18; // @[package.scala:16:47] wire _state_T_44; // @[package.scala:16:47] assign _state_T_44 = _GEN_18; // @[package.scala:16:47] wire _GEN_19 = io_req_uop_mem_cmd_0 == 5'hB; // @[package.scala:16:47] wire _r2_c_cat_T_8; // @[package.scala:16:47] assign _r2_c_cat_T_8 = _GEN_19; // @[package.scala:16:47] wire _r2_c_cat_T_31; // @[package.scala:16:47] assign _r2_c_cat_T_31 = _GEN_19; // @[package.scala:16:47] wire _needs_second_acq_T_8; // @[package.scala:16:47] assign _needs_second_acq_T_8 = _GEN_19; // @[package.scala:16:47] wire _dirties_cat_T_8; // @[package.scala:16:47] assign _dirties_cat_T_8 = _GEN_19; // @[package.scala:16:47] wire _dirties_cat_T_31; // @[package.scala:16:47] assign _dirties_cat_T_31 = _GEN_19; // @[package.scala:16:47] wire _state_r_c_cat_T_8; // @[package.scala:16:47] assign _state_r_c_cat_T_8 = _GEN_19; // @[package.scala:16:47] wire _state_r_c_cat_T_31; // @[package.scala:16:47] assign _state_r_c_cat_T_31 = _GEN_19; // @[package.scala:16:47] wire _state_T_11; // @[package.scala:16:47] assign _state_T_11 = _GEN_19; // @[package.scala:16:47] wire _r_c_cat_T_58; // @[package.scala:16:47] assign _r_c_cat_T_58 = _GEN_19; // @[package.scala:16:47] wire _r_c_cat_T_81; // @[package.scala:16:47] assign _r_c_cat_T_81 = _GEN_19; // @[package.scala:16:47] wire _state_r_c_cat_T_58; // @[package.scala:16:47] assign _state_r_c_cat_T_58 = _GEN_19; // @[package.scala:16:47] wire _state_r_c_cat_T_81; // @[package.scala:16:47] assign _state_r_c_cat_T_81 = _GEN_19; // @[package.scala:16:47] wire _state_T_45; // @[package.scala:16:47] assign _state_T_45 = _GEN_19; // @[package.scala:16:47] wire _r2_c_cat_T_9 = _r2_c_cat_T_5 | _r2_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_10 = _r2_c_cat_T_9 | _r2_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_11 = _r2_c_cat_T_10 | _r2_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _GEN_20 = io_req_uop_mem_cmd_0 == 5'h8; // @[package.scala:16:47] wire _r2_c_cat_T_12; // @[package.scala:16:47] assign _r2_c_cat_T_12 = _GEN_20; // @[package.scala:16:47] wire _r2_c_cat_T_35; // @[package.scala:16:47] assign _r2_c_cat_T_35 = _GEN_20; // @[package.scala:16:47] wire _needs_second_acq_T_12; // @[package.scala:16:47] assign _needs_second_acq_T_12 = _GEN_20; // @[package.scala:16:47] wire _dirties_cat_T_12; // @[package.scala:16:47] assign _dirties_cat_T_12 = _GEN_20; // @[package.scala:16:47] wire _dirties_cat_T_35; // @[package.scala:16:47] assign _dirties_cat_T_35 = _GEN_20; // @[package.scala:16:47] wire _state_r_c_cat_T_12; // @[package.scala:16:47] assign _state_r_c_cat_T_12 = _GEN_20; // @[package.scala:16:47] wire _state_r_c_cat_T_35; // @[package.scala:16:47] assign _state_r_c_cat_T_35 = _GEN_20; // @[package.scala:16:47] wire _state_T_15; // @[package.scala:16:47] assign _state_T_15 = _GEN_20; // @[package.scala:16:47] wire _r_c_cat_T_62; // @[package.scala:16:47] assign _r_c_cat_T_62 = _GEN_20; // @[package.scala:16:47] wire _r_c_cat_T_85; // @[package.scala:16:47] assign _r_c_cat_T_85 = _GEN_20; // @[package.scala:16:47] wire _state_r_c_cat_T_62; // @[package.scala:16:47] assign _state_r_c_cat_T_62 = _GEN_20; // @[package.scala:16:47] wire _state_r_c_cat_T_85; // @[package.scala:16:47] assign _state_r_c_cat_T_85 = _GEN_20; // @[package.scala:16:47] wire _state_T_49; // @[package.scala:16:47] assign _state_T_49 = _GEN_20; // @[package.scala:16:47] wire _GEN_21 = io_req_uop_mem_cmd_0 == 5'hC; // @[package.scala:16:47] wire _r2_c_cat_T_13; // @[package.scala:16:47] assign _r2_c_cat_T_13 = _GEN_21; // @[package.scala:16:47] wire _r2_c_cat_T_36; // @[package.scala:16:47] assign _r2_c_cat_T_36 = _GEN_21; // @[package.scala:16:47] wire _needs_second_acq_T_13; // @[package.scala:16:47] assign _needs_second_acq_T_13 = _GEN_21; // @[package.scala:16:47] wire _dirties_cat_T_13; // @[package.scala:16:47] assign _dirties_cat_T_13 = _GEN_21; // @[package.scala:16:47] wire _dirties_cat_T_36; // @[package.scala:16:47] assign _dirties_cat_T_36 = _GEN_21; // @[package.scala:16:47] wire _state_r_c_cat_T_13; // @[package.scala:16:47] assign _state_r_c_cat_T_13 = _GEN_21; // @[package.scala:16:47] wire _state_r_c_cat_T_36; // @[package.scala:16:47] assign _state_r_c_cat_T_36 = _GEN_21; // @[package.scala:16:47] wire _state_T_16; // @[package.scala:16:47] assign _state_T_16 = _GEN_21; // @[package.scala:16:47] wire _r_c_cat_T_63; // @[package.scala:16:47] assign _r_c_cat_T_63 = _GEN_21; // @[package.scala:16:47] wire _r_c_cat_T_86; // @[package.scala:16:47] assign _r_c_cat_T_86 = _GEN_21; // @[package.scala:16:47] wire _state_r_c_cat_T_63; // @[package.scala:16:47] assign _state_r_c_cat_T_63 = _GEN_21; // @[package.scala:16:47] wire _state_r_c_cat_T_86; // @[package.scala:16:47] assign _state_r_c_cat_T_86 = _GEN_21; // @[package.scala:16:47] wire _state_T_50; // @[package.scala:16:47] assign _state_T_50 = _GEN_21; // @[package.scala:16:47] wire _GEN_22 = io_req_uop_mem_cmd_0 == 5'hD; // @[package.scala:16:47] wire _r2_c_cat_T_14; // @[package.scala:16:47] assign _r2_c_cat_T_14 = _GEN_22; // @[package.scala:16:47] wire _r2_c_cat_T_37; // @[package.scala:16:47] assign _r2_c_cat_T_37 = _GEN_22; // @[package.scala:16:47] wire _needs_second_acq_T_14; // @[package.scala:16:47] assign _needs_second_acq_T_14 = _GEN_22; // @[package.scala:16:47] wire _dirties_cat_T_14; // @[package.scala:16:47] assign _dirties_cat_T_14 = _GEN_22; // @[package.scala:16:47] wire _dirties_cat_T_37; // @[package.scala:16:47] assign _dirties_cat_T_37 = _GEN_22; // @[package.scala:16:47] wire _state_r_c_cat_T_14; // @[package.scala:16:47] assign _state_r_c_cat_T_14 = _GEN_22; // @[package.scala:16:47] wire _state_r_c_cat_T_37; // @[package.scala:16:47] assign _state_r_c_cat_T_37 = _GEN_22; // @[package.scala:16:47] wire _state_T_17; // @[package.scala:16:47] assign _state_T_17 = _GEN_22; // @[package.scala:16:47] wire _r_c_cat_T_64; // @[package.scala:16:47] assign _r_c_cat_T_64 = _GEN_22; // @[package.scala:16:47] wire _r_c_cat_T_87; // @[package.scala:16:47] assign _r_c_cat_T_87 = _GEN_22; // @[package.scala:16:47] wire _state_r_c_cat_T_64; // @[package.scala:16:47] assign _state_r_c_cat_T_64 = _GEN_22; // @[package.scala:16:47] wire _state_r_c_cat_T_87; // @[package.scala:16:47] assign _state_r_c_cat_T_87 = _GEN_22; // @[package.scala:16:47] wire _state_T_51; // @[package.scala:16:47] assign _state_T_51 = _GEN_22; // @[package.scala:16:47] wire _GEN_23 = io_req_uop_mem_cmd_0 == 5'hE; // @[package.scala:16:47] wire _r2_c_cat_T_15; // @[package.scala:16:47] assign _r2_c_cat_T_15 = _GEN_23; // @[package.scala:16:47] wire _r2_c_cat_T_38; // @[package.scala:16:47] assign _r2_c_cat_T_38 = _GEN_23; // @[package.scala:16:47] wire _needs_second_acq_T_15; // @[package.scala:16:47] assign _needs_second_acq_T_15 = _GEN_23; // @[package.scala:16:47] wire _dirties_cat_T_15; // @[package.scala:16:47] assign _dirties_cat_T_15 = _GEN_23; // @[package.scala:16:47] wire _dirties_cat_T_38; // @[package.scala:16:47] assign _dirties_cat_T_38 = _GEN_23; // @[package.scala:16:47] wire _state_r_c_cat_T_15; // @[package.scala:16:47] assign _state_r_c_cat_T_15 = _GEN_23; // @[package.scala:16:47] wire _state_r_c_cat_T_38; // @[package.scala:16:47] assign _state_r_c_cat_T_38 = _GEN_23; // @[package.scala:16:47] wire _state_T_18; // @[package.scala:16:47] assign _state_T_18 = _GEN_23; // @[package.scala:16:47] wire _r_c_cat_T_65; // @[package.scala:16:47] assign _r_c_cat_T_65 = _GEN_23; // @[package.scala:16:47] wire _r_c_cat_T_88; // @[package.scala:16:47] assign _r_c_cat_T_88 = _GEN_23; // @[package.scala:16:47] wire _state_r_c_cat_T_65; // @[package.scala:16:47] assign _state_r_c_cat_T_65 = _GEN_23; // @[package.scala:16:47] wire _state_r_c_cat_T_88; // @[package.scala:16:47] assign _state_r_c_cat_T_88 = _GEN_23; // @[package.scala:16:47] wire _state_T_52; // @[package.scala:16:47] assign _state_T_52 = _GEN_23; // @[package.scala:16:47] wire _GEN_24 = io_req_uop_mem_cmd_0 == 5'hF; // @[package.scala:16:47] wire _r2_c_cat_T_16; // @[package.scala:16:47] assign _r2_c_cat_T_16 = _GEN_24; // @[package.scala:16:47] wire _r2_c_cat_T_39; // @[package.scala:16:47] assign _r2_c_cat_T_39 = _GEN_24; // @[package.scala:16:47] wire _needs_second_acq_T_16; // @[package.scala:16:47] assign _needs_second_acq_T_16 = _GEN_24; // @[package.scala:16:47] wire _dirties_cat_T_16; // @[package.scala:16:47] assign _dirties_cat_T_16 = _GEN_24; // @[package.scala:16:47] wire _dirties_cat_T_39; // @[package.scala:16:47] assign _dirties_cat_T_39 = _GEN_24; // @[package.scala:16:47] wire _state_r_c_cat_T_16; // @[package.scala:16:47] assign _state_r_c_cat_T_16 = _GEN_24; // @[package.scala:16:47] wire _state_r_c_cat_T_39; // @[package.scala:16:47] assign _state_r_c_cat_T_39 = _GEN_24; // @[package.scala:16:47] wire _state_T_19; // @[package.scala:16:47] assign _state_T_19 = _GEN_24; // @[package.scala:16:47] wire _r_c_cat_T_66; // @[package.scala:16:47] assign _r_c_cat_T_66 = _GEN_24; // @[package.scala:16:47] wire _r_c_cat_T_89; // @[package.scala:16:47] assign _r_c_cat_T_89 = _GEN_24; // @[package.scala:16:47] wire _state_r_c_cat_T_66; // @[package.scala:16:47] assign _state_r_c_cat_T_66 = _GEN_24; // @[package.scala:16:47] wire _state_r_c_cat_T_89; // @[package.scala:16:47] assign _state_r_c_cat_T_89 = _GEN_24; // @[package.scala:16:47] wire _state_T_53; // @[package.scala:16:47] assign _state_T_53 = _GEN_24; // @[package.scala:16:47] wire _r2_c_cat_T_17 = _r2_c_cat_T_12 | _r2_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_18 = _r2_c_cat_T_17 | _r2_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_19 = _r2_c_cat_T_18 | _r2_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_20 = _r2_c_cat_T_19 | _r2_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_21 = _r2_c_cat_T_11 | _r2_c_cat_T_20; // @[package.scala:81:59] wire _r2_c_cat_T_22 = _r2_c_cat_T_4 | _r2_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _r2_c_cat_T_25 = _r2_c_cat_T_23 | _r2_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _r2_c_cat_T_27 = _r2_c_cat_T_25 | _r2_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _r2_c_cat_T_32 = _r2_c_cat_T_28 | _r2_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_33 = _r2_c_cat_T_32 | _r2_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_34 = _r2_c_cat_T_33 | _r2_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_40 = _r2_c_cat_T_35 | _r2_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_41 = _r2_c_cat_T_40 | _r2_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_42 = _r2_c_cat_T_41 | _r2_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_43 = _r2_c_cat_T_42 | _r2_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _r2_c_cat_T_44 = _r2_c_cat_T_34 | _r2_c_cat_T_43; // @[package.scala:81:59] wire _r2_c_cat_T_45 = _r2_c_cat_T_27 | _r2_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _GEN_25 = io_req_uop_mem_cmd_0 == 5'h3; // @[Consts.scala:91:54] wire _r2_c_cat_T_46; // @[Consts.scala:91:54] assign _r2_c_cat_T_46 = _GEN_25; // @[Consts.scala:91:54] wire _needs_second_acq_T_23; // @[Consts.scala:91:54] assign _needs_second_acq_T_23 = _GEN_25; // @[Consts.scala:91:54] wire _dirties_cat_T_46; // @[Consts.scala:91:54] assign _dirties_cat_T_46 = _GEN_25; // @[Consts.scala:91:54] wire _rpq_io_enq_valid_T_4; // @[Consts.scala:88:52] assign _rpq_io_enq_valid_T_4 = _GEN_25; // @[Consts.scala:88:52, :91:54] wire _state_r_c_cat_T_46; // @[Consts.scala:91:54] assign _state_r_c_cat_T_46 = _GEN_25; // @[Consts.scala:91:54] wire _r_c_cat_T_96; // @[Consts.scala:91:54] assign _r_c_cat_T_96 = _GEN_25; // @[Consts.scala:91:54] wire _state_r_c_cat_T_96; // @[Consts.scala:91:54] assign _state_r_c_cat_T_96 = _GEN_25; // @[Consts.scala:91:54] wire _r2_c_cat_T_47 = _r2_c_cat_T_45 | _r2_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _GEN_26 = io_req_uop_mem_cmd_0 == 5'h6; // @[Consts.scala:91:71] wire _r2_c_cat_T_48; // @[Consts.scala:91:71] assign _r2_c_cat_T_48 = _GEN_26; // @[Consts.scala:91:71] wire _needs_second_acq_T_25; // @[Consts.scala:91:71] assign _needs_second_acq_T_25 = _GEN_26; // @[Consts.scala:91:71] wire _dirties_cat_T_48; // @[Consts.scala:91:71] assign _dirties_cat_T_48 = _GEN_26; // @[Consts.scala:91:71] wire _state_r_c_cat_T_48; // @[Consts.scala:91:71] assign _state_r_c_cat_T_48 = _GEN_26; // @[Consts.scala:91:71] wire _r_c_cat_T_98; // @[Consts.scala:91:71] assign _r_c_cat_T_98 = _GEN_26; // @[Consts.scala:91:71] wire _state_r_c_cat_T_98; // @[Consts.scala:91:71] assign _state_r_c_cat_T_98 = _GEN_26; // @[Consts.scala:91:71] wire _r2_c_cat_T_49 = _r2_c_cat_T_47 | _r2_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] r2_c = {_r2_c_cat_T_22, _r2_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _r2_T = {r2_c, new_coh_state}; // @[Metadata.scala:29:18, :58:19] wire _r2_T_25 = _r2_T == 4'hC; // @[Misc.scala:49:20] wire [1:0] _r2_T_27 = {1'h0, _r2_T_25}; // @[Misc.scala:35:36, :49:20] wire _r2_T_28 = _r2_T == 4'hD; // @[Misc.scala:49:20] wire [1:0] _r2_T_30 = _r2_T_28 ? 2'h2 : _r2_T_27; // @[Misc.scala:35:36, :49:20] wire _r2_T_31 = _r2_T == 4'h4; // @[Misc.scala:49:20] wire [1:0] _r2_T_33 = _r2_T_31 ? 2'h1 : _r2_T_30; // @[Misc.scala:35:36, :49:20] wire _r2_T_34 = _r2_T == 4'h5; // @[Misc.scala:49:20] wire [1:0] _r2_T_36 = _r2_T_34 ? 2'h2 : _r2_T_33; // @[Misc.scala:35:36, :49:20] wire _r2_T_37 = _r2_T == 4'h0; // @[Misc.scala:49:20] wire [1:0] _r2_T_39 = _r2_T_37 ? 2'h0 : _r2_T_36; // @[Misc.scala:35:36, :49:20] wire _r2_T_40 = _r2_T == 4'hE; // @[Misc.scala:49:20] wire _r2_T_41 = _r2_T_40; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_42 = _r2_T_40 ? 2'h3 : _r2_T_39; // @[Misc.scala:35:36, :49:20] wire _r2_T_43 = &_r2_T; // @[Misc.scala:49:20] wire _r2_T_44 = _r2_T_43 | _r2_T_41; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_45 = _r2_T_43 ? 2'h3 : _r2_T_42; // @[Misc.scala:35:36, :49:20] wire _r2_T_46 = _r2_T == 4'h6; // @[Misc.scala:49:20] wire _r2_T_47 = _r2_T_46 | _r2_T_44; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_48 = _r2_T_46 ? 2'h2 : _r2_T_45; // @[Misc.scala:35:36, :49:20] wire _r2_T_49 = _r2_T == 4'h7; // @[Misc.scala:49:20] wire _r2_T_50 = _r2_T_49 | _r2_T_47; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_51 = _r2_T_49 ? 2'h3 : _r2_T_48; // @[Misc.scala:35:36, :49:20] wire _r2_T_52 = _r2_T == 4'h1; // @[Misc.scala:49:20] wire _r2_T_53 = _r2_T_52 | _r2_T_50; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_54 = _r2_T_52 ? 2'h1 : _r2_T_51; // @[Misc.scala:35:36, :49:20] wire _r2_T_55 = _r2_T == 4'h2; // @[Misc.scala:49:20] wire _r2_T_56 = _r2_T_55 | _r2_T_53; // @[Misc.scala:35:9, :49:20] wire [1:0] _r2_T_57 = _r2_T_55 ? 2'h2 : _r2_T_54; // @[Misc.scala:35:36, :49:20] wire _r2_T_58 = _r2_T == 4'h3; // @[Misc.scala:49:20] wire r2_1 = _r2_T_58 | _r2_T_56; // @[Misc.scala:35:9, :49:20] wire [1:0] r2_2 = _r2_T_58 ? 2'h3 : _r2_T_57; // @[Misc.scala:35:36, :49:20] wire _needs_second_acq_T_2 = _needs_second_acq_T | _needs_second_acq_T_1; // @[Consts.scala:90:{32,42,49}] wire _needs_second_acq_T_4 = _needs_second_acq_T_2 | _needs_second_acq_T_3; // @[Consts.scala:90:{42,59,66}] wire _needs_second_acq_T_9 = _needs_second_acq_T_5 | _needs_second_acq_T_6; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_10 = _needs_second_acq_T_9 | _needs_second_acq_T_7; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_11 = _needs_second_acq_T_10 | _needs_second_acq_T_8; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_17 = _needs_second_acq_T_12 | _needs_second_acq_T_13; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_18 = _needs_second_acq_T_17 | _needs_second_acq_T_14; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_19 = _needs_second_acq_T_18 | _needs_second_acq_T_15; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_20 = _needs_second_acq_T_19 | _needs_second_acq_T_16; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_21 = _needs_second_acq_T_11 | _needs_second_acq_T_20; // @[package.scala:81:59] wire _needs_second_acq_T_22 = _needs_second_acq_T_4 | _needs_second_acq_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _needs_second_acq_T_24 = _needs_second_acq_T_22 | _needs_second_acq_T_23; // @[Consts.scala:90:76, :91:{47,54}] wire _needs_second_acq_T_26 = _needs_second_acq_T_24 | _needs_second_acq_T_25; // @[Consts.scala:91:{47,64,71}] wire _needs_second_acq_T_29 = _needs_second_acq_T_27 | _needs_second_acq_T_28; // @[Consts.scala:90:{32,42,49}] wire _needs_second_acq_T_31 = _needs_second_acq_T_29 | _needs_second_acq_T_30; // @[Consts.scala:90:{42,59,66}] wire _needs_second_acq_T_36 = _needs_second_acq_T_32 | _needs_second_acq_T_33; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_37 = _needs_second_acq_T_36 | _needs_second_acq_T_34; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_38 = _needs_second_acq_T_37 | _needs_second_acq_T_35; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_44 = _needs_second_acq_T_39 | _needs_second_acq_T_40; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_45 = _needs_second_acq_T_44 | _needs_second_acq_T_41; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_46 = _needs_second_acq_T_45 | _needs_second_acq_T_42; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_47 = _needs_second_acq_T_46 | _needs_second_acq_T_43; // @[package.scala:16:47, :81:59] wire _needs_second_acq_T_48 = _needs_second_acq_T_38 | _needs_second_acq_T_47; // @[package.scala:81:59] wire _needs_second_acq_T_49 = _needs_second_acq_T_31 | _needs_second_acq_T_48; // @[Consts.scala:87:44, :90:{59,76}] wire _needs_second_acq_T_51 = _needs_second_acq_T_49 | _needs_second_acq_T_50; // @[Consts.scala:90:76, :91:{47,54}] wire _needs_second_acq_T_53 = _needs_second_acq_T_51 | _needs_second_acq_T_52; // @[Consts.scala:91:{47,64,71}] wire _needs_second_acq_T_54 = ~_needs_second_acq_T_53; // @[Metadata.scala:104:57] wire cmd_requires_second_acquire = _needs_second_acq_T_26 & _needs_second_acq_T_54; // @[Metadata.scala:104:{54,57}] wire is_hit_again = r1_1 & r2_1; // @[Misc.scala:35:9] wire _dirties_cat_T_2 = _dirties_cat_T | _dirties_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _dirties_cat_T_4 = _dirties_cat_T_2 | _dirties_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _dirties_cat_T_9 = _dirties_cat_T_5 | _dirties_cat_T_6; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_10 = _dirties_cat_T_9 | _dirties_cat_T_7; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_11 = _dirties_cat_T_10 | _dirties_cat_T_8; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_17 = _dirties_cat_T_12 | _dirties_cat_T_13; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_18 = _dirties_cat_T_17 | _dirties_cat_T_14; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_19 = _dirties_cat_T_18 | _dirties_cat_T_15; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_20 = _dirties_cat_T_19 | _dirties_cat_T_16; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_21 = _dirties_cat_T_11 | _dirties_cat_T_20; // @[package.scala:81:59] wire _dirties_cat_T_22 = _dirties_cat_T_4 | _dirties_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _dirties_cat_T_25 = _dirties_cat_T_23 | _dirties_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _dirties_cat_T_27 = _dirties_cat_T_25 | _dirties_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _dirties_cat_T_32 = _dirties_cat_T_28 | _dirties_cat_T_29; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_33 = _dirties_cat_T_32 | _dirties_cat_T_30; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_34 = _dirties_cat_T_33 | _dirties_cat_T_31; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_40 = _dirties_cat_T_35 | _dirties_cat_T_36; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_41 = _dirties_cat_T_40 | _dirties_cat_T_37; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_42 = _dirties_cat_T_41 | _dirties_cat_T_38; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_43 = _dirties_cat_T_42 | _dirties_cat_T_39; // @[package.scala:16:47, :81:59] wire _dirties_cat_T_44 = _dirties_cat_T_34 | _dirties_cat_T_43; // @[package.scala:81:59] wire _dirties_cat_T_45 = _dirties_cat_T_27 | _dirties_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _dirties_cat_T_47 = _dirties_cat_T_45 | _dirties_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _dirties_cat_T_49 = _dirties_cat_T_47 | _dirties_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] dirties_cat = {_dirties_cat_T_22, _dirties_cat_T_49}; // @[Metadata.scala:29:18] wire dirties = &dirties_cat; // @[Metadata.scala:29:18, :106:42] wire [1:0] biggest_grow_param = dirties ? r2_2 : r1_2; // @[Misc.scala:35:36] wire [1:0] dirtier_coh_state = biggest_grow_param; // @[Metadata.scala:107:33, :160:20] wire [4:0] dirtier_cmd = dirties ? io_req_uop_mem_cmd_0 : req_uop_mem_cmd; // @[Metadata.scala:106:42, :109:27] wire _T_16 = io_mem_grant_ready_0 & io_mem_grant_valid_0; // @[Decoupled.scala:51:35] wire [26:0] _r_beats1_decode_T = 27'hFFF << io_mem_grant_bits_size_0; // @[package.scala:243:71] wire [11:0] _r_beats1_decode_T_1 = _r_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _r_beats1_decode_T_2 = ~_r_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] r_beats1_decode = _r_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire r_beats1_opdata = io_mem_grant_bits_opcode_0[0]; // @[Edges.scala:106:36] wire opdata = io_mem_grant_bits_opcode_0[0]; // @[Edges.scala:106:36] wire grant_had_data_opdata = io_mem_grant_bits_opcode_0[0]; // @[Edges.scala:106:36] wire [8:0] r_beats1 = r_beats1_opdata ? r_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] r_counter; // @[Edges.scala:229:27] wire [9:0] _r_counter1_T = {1'h0, r_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] r_counter1 = _r_counter1_T[8:0]; // @[Edges.scala:230:28] wire r_1_1 = r_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _r_last_T = r_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _r_last_T_1 = r_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire r_2 = _r_last_T | _r_last_T_1; // @[Edges.scala:232:{25,33,43}] wire refill_done = r_2 & _T_16; // @[Decoupled.scala:51:35] wire [8:0] _r_count_T = ~r_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] r_4 = r_beats1 & _r_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _r_counter_T = r_1_1 ? r_beats1 : r_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [11:0] refill_address_inc = {r_4, 3'h0}; // @[Edges.scala:234:25, :269:29] wire _sec_rdy_T = ~cmd_requires_second_acquire; // @[Metadata.scala:104:54] wire _sec_rdy_T_1 = ~io_req_is_probe_0; // @[mshrs.scala:36:7, :125:50] wire _sec_rdy_T_2 = _sec_rdy_T & _sec_rdy_T_1; // @[mshrs.scala:125:{18,47,50}] wire _sec_rdy_T_3 = ~(|state); // @[package.scala:16:47] wire _sec_rdy_T_4 = state == 5'hD; // @[package.scala:16:47] wire _sec_rdy_T_5 = state == 5'hE; // @[package.scala:16:47] wire _sec_rdy_T_6 = state == 5'hF; // @[package.scala:16:47] wire _sec_rdy_T_7 = _sec_rdy_T_3 | _sec_rdy_T_4; // @[package.scala:16:47, :81:59] wire _sec_rdy_T_8 = _sec_rdy_T_7 | _sec_rdy_T_5; // @[package.scala:16:47, :81:59] wire _sec_rdy_T_9 = _sec_rdy_T_8 | _sec_rdy_T_6; // @[package.scala:16:47, :81:59] wire _sec_rdy_T_10 = ~_sec_rdy_T_9; // @[package.scala:81:59] wire sec_rdy = _sec_rdy_T_2 & _sec_rdy_T_10; // @[mshrs.scala:125:{47,67}, :126:18] wire _rpq_io_enq_valid_T = io_req_pri_val_0 & io_req_pri_rdy_0; // @[mshrs.scala:36:7, :133:40] wire _rpq_io_enq_valid_T_1 = io_req_sec_val_0 & io_req_sec_rdy_0; // @[mshrs.scala:36:7, :133:78] wire _rpq_io_enq_valid_T_2 = _rpq_io_enq_valid_T | _rpq_io_enq_valid_T_1; // @[mshrs.scala:133:{40,59,78}] wire _rpq_io_enq_valid_T_3 = io_req_uop_mem_cmd_0 == 5'h2; // @[Consts.scala:88:35] wire _rpq_io_enq_valid_T_5 = _rpq_io_enq_valid_T_3 | _rpq_io_enq_valid_T_4; // @[Consts.scala:88:{35,45,52}] wire _rpq_io_enq_valid_T_6 = ~_rpq_io_enq_valid_T_5; // @[Consts.scala:88:45] wire _rpq_io_enq_valid_T_7 = _rpq_io_enq_valid_T_2 & _rpq_io_enq_valid_T_6; // @[mshrs.scala:133:{59,98,101}] reg grantack_valid; // @[mshrs.scala:138:21] reg [2:0] grantack_bits_sink; // @[mshrs.scala:138:21] assign io_mem_finish_bits_sink_0 = grantack_bits_sink; // @[mshrs.scala:36:7, :138:21] reg [2:0] refill_ctr; // @[mshrs.scala:139:24] reg commit_line; // @[mshrs.scala:140:24] reg grant_had_data; // @[mshrs.scala:141:27] reg finish_to_prefetch; // @[mshrs.scala:142:31] reg [1:0] meta_hazard; // @[mshrs.scala:145:28] wire [2:0] _meta_hazard_T = {1'h0, meta_hazard} + 3'h1; // @[mshrs.scala:145:28, :146:59] wire [1:0] _meta_hazard_T_1 = _meta_hazard_T[1:0]; // @[mshrs.scala:146:59] wire _io_probe_rdy_T = meta_hazard == 2'h0; // @[mshrs.scala:145:28, :148:34] wire _io_probe_rdy_T_1 = ~(|state); // @[package.scala:16:47] wire _io_probe_rdy_T_2 = state == 5'h1; // @[package.scala:16:47] wire _io_probe_rdy_T_3 = state == 5'h2; // @[package.scala:16:47] wire _io_probe_rdy_T_4 = state == 5'h3; // @[package.scala:16:47] wire _io_probe_rdy_T_5 = _io_probe_rdy_T_1 | _io_probe_rdy_T_2; // @[package.scala:16:47, :81:59] wire _io_probe_rdy_T_6 = _io_probe_rdy_T_5 | _io_probe_rdy_T_3; // @[package.scala:16:47, :81:59] wire _io_probe_rdy_T_7 = _io_probe_rdy_T_6 | _io_probe_rdy_T_4; // @[package.scala:16:47, :81:59] wire _io_probe_rdy_T_8 = state == 5'h4; // @[mshrs.scala:107:22, :148:129] wire _io_probe_rdy_T_9 = _io_probe_rdy_T_8 & grantack_valid; // @[mshrs.scala:138:21, :148:{129,145}] wire _io_probe_rdy_T_10 = _io_probe_rdy_T_7 | _io_probe_rdy_T_9; // @[package.scala:81:59] assign _io_probe_rdy_T_11 = _io_probe_rdy_T & _io_probe_rdy_T_10; // @[mshrs.scala:148:{34,42,119}] assign io_probe_rdy_0 = _io_probe_rdy_T_11; // @[mshrs.scala:36:7, :148:42] assign _io_idx_valid_T = |state; // @[package.scala:16:47] assign io_idx_valid_0 = _io_idx_valid_T; // @[mshrs.scala:36:7, :149:25] assign _io_tag_valid_T = |state; // @[package.scala:16:47] assign io_tag_valid_0 = _io_tag_valid_T; // @[mshrs.scala:36:7, :150:25] wire _io_way_valid_T = ~(|state); // @[package.scala:16:47] wire _io_way_valid_T_1 = state == 5'h11; // @[package.scala:16:47] wire _io_way_valid_T_2 = _io_way_valid_T | _io_way_valid_T_1; // @[package.scala:16:47, :81:59] assign _io_way_valid_T_3 = ~_io_way_valid_T_2; // @[package.scala:81:59] assign io_way_valid_0 = _io_way_valid_T_3; // @[mshrs.scala:36:7, :151:19] assign _io_req_sec_rdy_T = sec_rdy & _rpq_io_enq_ready; // @[mshrs.scala:125:67, :128:19, :159:37] assign io_req_sec_rdy_0 = _io_req_sec_rdy_T; // @[mshrs.scala:36:7, :159:37] wire [4:0] state_new_state; // @[mshrs.scala:191:29] wire _state_T_1 = ~_state_T; // @[mshrs.scala:194:11] wire _state_T_2 = ~_rpq_io_enq_ready; // @[mshrs.scala:128:19, :194:11] wire [3:0] _GEN_27 = {2'h2, io_req_old_meta_coh_state_0}; // @[Metadata.scala:120:19] wire [3:0] _state_req_needs_wb_r_T_6; // @[Metadata.scala:120:19] assign _state_req_needs_wb_r_T_6 = _GEN_27; // @[Metadata.scala:120:19] wire [3:0] _state_req_needs_wb_r_T_70; // @[Metadata.scala:120:19] assign _state_req_needs_wb_r_T_70 = _GEN_27; // @[Metadata.scala:120:19] wire _state_req_needs_wb_r_T_19 = _state_req_needs_wb_r_T_6 == 4'h8; // @[Misc.scala:56:20] wire [2:0] _state_req_needs_wb_r_T_21 = _state_req_needs_wb_r_T_19 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20] wire _state_req_needs_wb_r_T_23 = _state_req_needs_wb_r_T_6 == 4'h9; // @[Misc.scala:56:20] wire [2:0] _state_req_needs_wb_r_T_25 = _state_req_needs_wb_r_T_23 ? 3'h2 : _state_req_needs_wb_r_T_21; // @[Misc.scala:38:36, :56:20] wire _state_req_needs_wb_r_T_27 = _state_req_needs_wb_r_T_6 == 4'hA; // @[Misc.scala:56:20] wire [2:0] _state_req_needs_wb_r_T_29 = _state_req_needs_wb_r_T_27 ? 3'h1 : _state_req_needs_wb_r_T_25; // @[Misc.scala:38:36, :56:20] wire _state_req_needs_wb_r_T_31 = _state_req_needs_wb_r_T_6 == 4'hB; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_32 = _state_req_needs_wb_r_T_31; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_33 = _state_req_needs_wb_r_T_31 ? 3'h1 : _state_req_needs_wb_r_T_29; // @[Misc.scala:38:36, :56:20] wire _state_req_needs_wb_r_T_35 = _state_req_needs_wb_r_T_6 == 4'h4; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_36 = ~_state_req_needs_wb_r_T_35 & _state_req_needs_wb_r_T_32; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_37 = _state_req_needs_wb_r_T_35 ? 3'h5 : _state_req_needs_wb_r_T_33; // @[Misc.scala:38:36, :56:20] wire _state_req_needs_wb_r_T_39 = _state_req_needs_wb_r_T_6 == 4'h5; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_40 = ~_state_req_needs_wb_r_T_39 & _state_req_needs_wb_r_T_36; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_41 = _state_req_needs_wb_r_T_39 ? 3'h4 : _state_req_needs_wb_r_T_37; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_42 = {1'h0, _state_req_needs_wb_r_T_39}; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_43 = _state_req_needs_wb_r_T_6 == 4'h6; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_44 = ~_state_req_needs_wb_r_T_43 & _state_req_needs_wb_r_T_40; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_45 = _state_req_needs_wb_r_T_43 ? 3'h0 : _state_req_needs_wb_r_T_41; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_46 = _state_req_needs_wb_r_T_43 ? 2'h1 : _state_req_needs_wb_r_T_42; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_47 = _state_req_needs_wb_r_T_6 == 4'h7; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_48 = _state_req_needs_wb_r_T_47 | _state_req_needs_wb_r_T_44; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_49 = _state_req_needs_wb_r_T_47 ? 3'h0 : _state_req_needs_wb_r_T_45; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_50 = _state_req_needs_wb_r_T_47 ? 2'h1 : _state_req_needs_wb_r_T_46; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_51 = _state_req_needs_wb_r_T_6 == 4'h0; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_52 = ~_state_req_needs_wb_r_T_51 & _state_req_needs_wb_r_T_48; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_53 = _state_req_needs_wb_r_T_51 ? 3'h5 : _state_req_needs_wb_r_T_49; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_54 = _state_req_needs_wb_r_T_51 ? 2'h0 : _state_req_needs_wb_r_T_50; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_55 = _state_req_needs_wb_r_T_6 == 4'h1; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_56 = ~_state_req_needs_wb_r_T_55 & _state_req_needs_wb_r_T_52; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_57 = _state_req_needs_wb_r_T_55 ? 3'h4 : _state_req_needs_wb_r_T_53; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_58 = _state_req_needs_wb_r_T_55 ? 2'h1 : _state_req_needs_wb_r_T_54; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_59 = _state_req_needs_wb_r_T_6 == 4'h2; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_60 = ~_state_req_needs_wb_r_T_59 & _state_req_needs_wb_r_T_56; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_61 = _state_req_needs_wb_r_T_59 ? 3'h3 : _state_req_needs_wb_r_T_57; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_62 = _state_req_needs_wb_r_T_59 ? 2'h2 : _state_req_needs_wb_r_T_58; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_63 = _state_req_needs_wb_r_T_6 == 4'h3; // @[Misc.scala:56:20] wire state_req_needs_wb_r_1 = _state_req_needs_wb_r_T_63 | _state_req_needs_wb_r_T_60; // @[Misc.scala:38:9, :56:20] wire [2:0] state_req_needs_wb_r_2 = _state_req_needs_wb_r_T_63 ? 3'h3 : _state_req_needs_wb_r_T_61; // @[Misc.scala:38:36, :56:20] wire [1:0] state_req_needs_wb_r_3 = _state_req_needs_wb_r_T_63 ? 2'h2 : _state_req_needs_wb_r_T_62; // @[Misc.scala:38:63, :56:20] wire [1:0] state_req_needs_wb_meta_state = state_req_needs_wb_r_3; // @[Misc.scala:38:63] wire _state_r_c_cat_T_2 = _state_r_c_cat_T | _state_r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _state_r_c_cat_T_4 = _state_r_c_cat_T_2 | _state_r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _state_r_c_cat_T_9 = _state_r_c_cat_T_5 | _state_r_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_10 = _state_r_c_cat_T_9 | _state_r_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_11 = _state_r_c_cat_T_10 | _state_r_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_17 = _state_r_c_cat_T_12 | _state_r_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_18 = _state_r_c_cat_T_17 | _state_r_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_19 = _state_r_c_cat_T_18 | _state_r_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_20 = _state_r_c_cat_T_19 | _state_r_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_21 = _state_r_c_cat_T_11 | _state_r_c_cat_T_20; // @[package.scala:81:59] wire _state_r_c_cat_T_22 = _state_r_c_cat_T_4 | _state_r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _state_r_c_cat_T_25 = _state_r_c_cat_T_23 | _state_r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _state_r_c_cat_T_27 = _state_r_c_cat_T_25 | _state_r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _state_r_c_cat_T_32 = _state_r_c_cat_T_28 | _state_r_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_33 = _state_r_c_cat_T_32 | _state_r_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_34 = _state_r_c_cat_T_33 | _state_r_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_40 = _state_r_c_cat_T_35 | _state_r_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_41 = _state_r_c_cat_T_40 | _state_r_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_42 = _state_r_c_cat_T_41 | _state_r_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_43 = _state_r_c_cat_T_42 | _state_r_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_44 = _state_r_c_cat_T_34 | _state_r_c_cat_T_43; // @[package.scala:81:59] wire _state_r_c_cat_T_45 = _state_r_c_cat_T_27 | _state_r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _state_r_c_cat_T_47 = _state_r_c_cat_T_45 | _state_r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _state_r_c_cat_T_49 = _state_r_c_cat_T_47 | _state_r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] state_r_c = {_state_r_c_cat_T_22, _state_r_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _state_r_T = {state_r_c, io_req_old_meta_coh_state_0}; // @[Metadata.scala:29:18, :58:19] wire _state_r_T_25 = _state_r_T == 4'hC; // @[Misc.scala:49:20] wire [1:0] _state_r_T_27 = {1'h0, _state_r_T_25}; // @[Misc.scala:35:36, :49:20] wire _state_r_T_28 = _state_r_T == 4'hD; // @[Misc.scala:49:20] wire [1:0] _state_r_T_30 = _state_r_T_28 ? 2'h2 : _state_r_T_27; // @[Misc.scala:35:36, :49:20] wire _state_r_T_31 = _state_r_T == 4'h4; // @[Misc.scala:49:20] wire [1:0] _state_r_T_33 = _state_r_T_31 ? 2'h1 : _state_r_T_30; // @[Misc.scala:35:36, :49:20] wire _state_r_T_34 = _state_r_T == 4'h5; // @[Misc.scala:49:20] wire [1:0] _state_r_T_36 = _state_r_T_34 ? 2'h2 : _state_r_T_33; // @[Misc.scala:35:36, :49:20] wire _state_r_T_37 = _state_r_T == 4'h0; // @[Misc.scala:49:20] wire [1:0] _state_r_T_39 = _state_r_T_37 ? 2'h0 : _state_r_T_36; // @[Misc.scala:35:36, :49:20] wire _state_r_T_40 = _state_r_T == 4'hE; // @[Misc.scala:49:20] wire _state_r_T_41 = _state_r_T_40; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_42 = _state_r_T_40 ? 2'h3 : _state_r_T_39; // @[Misc.scala:35:36, :49:20] wire _state_r_T_43 = &_state_r_T; // @[Misc.scala:49:20] wire _state_r_T_44 = _state_r_T_43 | _state_r_T_41; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_45 = _state_r_T_43 ? 2'h3 : _state_r_T_42; // @[Misc.scala:35:36, :49:20] wire _state_r_T_46 = _state_r_T == 4'h6; // @[Misc.scala:49:20] wire _state_r_T_47 = _state_r_T_46 | _state_r_T_44; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_48 = _state_r_T_46 ? 2'h2 : _state_r_T_45; // @[Misc.scala:35:36, :49:20] wire _state_r_T_49 = _state_r_T == 4'h7; // @[Misc.scala:49:20] wire _state_r_T_50 = _state_r_T_49 | _state_r_T_47; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_51 = _state_r_T_49 ? 2'h3 : _state_r_T_48; // @[Misc.scala:35:36, :49:20] wire _state_r_T_52 = _state_r_T == 4'h1; // @[Misc.scala:49:20] wire _state_r_T_53 = _state_r_T_52 | _state_r_T_50; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_54 = _state_r_T_52 ? 2'h1 : _state_r_T_51; // @[Misc.scala:35:36, :49:20] wire _state_r_T_55 = _state_r_T == 4'h2; // @[Misc.scala:49:20] wire _state_r_T_56 = _state_r_T_55 | _state_r_T_53; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_57 = _state_r_T_55 ? 2'h2 : _state_r_T_54; // @[Misc.scala:35:36, :49:20] wire _state_r_T_58 = _state_r_T == 4'h3; // @[Misc.scala:49:20] wire state_is_hit = _state_r_T_58 | _state_r_T_56; // @[Misc.scala:35:9, :49:20] wire [1:0] state_r_2 = _state_r_T_58 ? 2'h3 : _state_r_T_57; // @[Misc.scala:35:36, :49:20] wire [1:0] state_coh_on_hit_state = state_r_2; // @[Misc.scala:35:36] wire _state_T_5 = _state_T_3 | _state_T_4; // @[Consts.scala:90:{32,42,49}] wire _state_T_7 = _state_T_5 | _state_T_6; // @[Consts.scala:90:{42,59,66}] wire _state_T_12 = _state_T_8 | _state_T_9; // @[package.scala:16:47, :81:59] wire _state_T_13 = _state_T_12 | _state_T_10; // @[package.scala:16:47, :81:59] wire _state_T_14 = _state_T_13 | _state_T_11; // @[package.scala:16:47, :81:59] wire _state_T_20 = _state_T_15 | _state_T_16; // @[package.scala:16:47, :81:59] wire _state_T_21 = _state_T_20 | _state_T_17; // @[package.scala:16:47, :81:59] wire _state_T_22 = _state_T_21 | _state_T_18; // @[package.scala:16:47, :81:59] wire _state_T_23 = _state_T_22 | _state_T_19; // @[package.scala:16:47, :81:59] wire _state_T_24 = _state_T_14 | _state_T_23; // @[package.scala:81:59] wire _state_T_25 = _state_T_7 | _state_T_24; // @[Consts.scala:87:44, :90:{59,76}] wire _state_T_27 = ~_state_T_26; // @[mshrs.scala:201:15] wire _state_T_28 = ~_state_T_25; // @[Consts.scala:90:76] assign state_new_state = io_req_tag_match_0 & state_is_hit ? 5'hC : 5'h1; // @[Misc.scala:35:9] assign io_mem_acquire_valid_0 = (|state) & _io_probe_rdy_T_2; // @[package.scala:16:47] wire [27:0] _GEN_28 = {req_tag, req_idx}; // @[mshrs.scala:110:25, :111:26, :227:28] wire [27:0] _io_mem_acquire_bits_T; // @[mshrs.scala:227:28] assign _io_mem_acquire_bits_T = _GEN_28; // @[mshrs.scala:227:28] wire [27:0] rp_addr_hi; // @[mshrs.scala:261:22] assign rp_addr_hi = _GEN_28; // @[mshrs.scala:227:28, :261:22] wire [27:0] hi; // @[mshrs.scala:266:10] assign hi = _GEN_28; // @[mshrs.scala:227:28, :266:10] wire [27:0] io_replay_bits_addr_hi; // @[mshrs.scala:353:31] assign io_replay_bits_addr_hi = _GEN_28; // @[mshrs.scala:227:28, :353:31] wire [33:0] _io_mem_acquire_bits_T_1 = {_io_mem_acquire_bits_T, 6'h0}; // @[mshrs.scala:227:{28,47}] wire [33:0] _io_mem_acquire_bits_legal_T_1 = _io_mem_acquire_bits_T_1; // @[Parameters.scala:137:31] wire [34:0] _io_mem_acquire_bits_legal_T_2 = {1'h0, _io_mem_acquire_bits_legal_T_1}; // @[Parameters.scala:137:{31,41}] wire [34:0] _io_mem_acquire_bits_legal_T_3 = _io_mem_acquire_bits_legal_T_2 & 35'h80000000; // @[Parameters.scala:137:{41,46}] wire [34:0] _io_mem_acquire_bits_legal_T_4 = _io_mem_acquire_bits_legal_T_3; // @[Parameters.scala:137:46] wire _io_mem_acquire_bits_legal_T_5 = _io_mem_acquire_bits_legal_T_4 == 35'h0; // @[Parameters.scala:137:{46,59}] assign io_mem_acquire_bits_a_address = _io_mem_acquire_bits_T_1[31:0]; // @[Edges.scala:346:17] wire [33:0] _io_mem_acquire_bits_legal_T_9 = {_io_mem_acquire_bits_T_1[33:32], io_mem_acquire_bits_a_address ^ 32'h80000000}; // @[Edges.scala:346:17] wire [34:0] _io_mem_acquire_bits_legal_T_10 = {1'h0, _io_mem_acquire_bits_legal_T_9}; // @[Parameters.scala:137:{31,41}] wire [34:0] _io_mem_acquire_bits_legal_T_11 = _io_mem_acquire_bits_legal_T_10 & 35'h80000000; // @[Parameters.scala:137:{41,46}] wire [34:0] _io_mem_acquire_bits_legal_T_12 = _io_mem_acquire_bits_legal_T_11; // @[Parameters.scala:137:46] wire _io_mem_acquire_bits_legal_T_13 = _io_mem_acquire_bits_legal_T_12 == 35'h0; // @[Parameters.scala:137:{46,59}] wire _io_mem_acquire_bits_legal_T_14 = _io_mem_acquire_bits_legal_T_13; // @[Parameters.scala:684:54] wire io_mem_acquire_bits_legal = _io_mem_acquire_bits_legal_T_14; // @[Parameters.scala:684:54, :686:26] assign io_mem_acquire_bits_param_0 = io_mem_acquire_bits_a_param; // @[Edges.scala:346:17] assign io_mem_acquire_bits_address_0 = io_mem_acquire_bits_a_address; // @[Edges.scala:346:17] assign io_mem_acquire_bits_a_param = {1'h0, grow_param}; // @[Misc.scala:35:36] wire io_mem_acquire_bits_a_mask_sub_sub_bit = _io_mem_acquire_bits_T_1[2]; // @[Misc.scala:210:26] wire io_mem_acquire_bits_a_mask_sub_sub_1_2 = io_mem_acquire_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire io_mem_acquire_bits_a_mask_sub_sub_nbit = ~io_mem_acquire_bits_a_mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire io_mem_acquire_bits_a_mask_sub_sub_0_2 = io_mem_acquire_bits_a_mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _io_mem_acquire_bits_a_mask_sub_sub_acc_T = io_mem_acquire_bits_a_mask_sub_sub_0_2; // @[Misc.scala:214:27, :215:38] wire _io_mem_acquire_bits_a_mask_sub_sub_acc_T_1 = io_mem_acquire_bits_a_mask_sub_sub_1_2; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_sub_bit = _io_mem_acquire_bits_T_1[1]; // @[Misc.scala:210:26] wire io_mem_acquire_bits_a_mask_sub_nbit = ~io_mem_acquire_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire io_mem_acquire_bits_a_mask_sub_0_2 = io_mem_acquire_bits_a_mask_sub_sub_0_2 & io_mem_acquire_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire io_mem_acquire_bits_a_mask_sub_1_2 = io_mem_acquire_bits_a_mask_sub_sub_0_2 & io_mem_acquire_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire io_mem_acquire_bits_a_mask_sub_2_2 = io_mem_acquire_bits_a_mask_sub_sub_1_2 & io_mem_acquire_bits_a_mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire io_mem_acquire_bits_a_mask_sub_3_2 = io_mem_acquire_bits_a_mask_sub_sub_1_2 & io_mem_acquire_bits_a_mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire io_mem_acquire_bits_a_mask_bit = _io_mem_acquire_bits_T_1[0]; // @[Misc.scala:210:26] wire io_mem_acquire_bits_a_mask_nbit = ~io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :211:20] wire io_mem_acquire_bits_a_mask_eq = io_mem_acquire_bits_a_mask_sub_0_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T = io_mem_acquire_bits_a_mask_eq; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_1 = io_mem_acquire_bits_a_mask_sub_0_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_1 = io_mem_acquire_bits_a_mask_eq_1; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_2 = io_mem_acquire_bits_a_mask_sub_1_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_2 = io_mem_acquire_bits_a_mask_eq_2; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_3 = io_mem_acquire_bits_a_mask_sub_1_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_3 = io_mem_acquire_bits_a_mask_eq_3; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_4 = io_mem_acquire_bits_a_mask_sub_2_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_4 = io_mem_acquire_bits_a_mask_eq_4; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_5 = io_mem_acquire_bits_a_mask_sub_2_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_5 = io_mem_acquire_bits_a_mask_eq_5; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_6 = io_mem_acquire_bits_a_mask_sub_3_2 & io_mem_acquire_bits_a_mask_nbit; // @[Misc.scala:211:20, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_6 = io_mem_acquire_bits_a_mask_eq_6; // @[Misc.scala:214:27, :215:38] wire io_mem_acquire_bits_a_mask_eq_7 = io_mem_acquire_bits_a_mask_sub_3_2 & io_mem_acquire_bits_a_mask_bit; // @[Misc.scala:210:26, :214:27] wire _io_mem_acquire_bits_a_mask_acc_T_7 = io_mem_acquire_bits_a_mask_eq_7; // @[Misc.scala:214:27, :215:38] wire _GEN_29 = ~(|state) | _io_probe_rdy_T_2; // @[package.scala:16:47] assign io_lb_write_valid_0 = ~_GEN_29 & _io_probe_rdy_T_3 & opdata & io_mem_grant_valid_0; // @[package.scala:16:47] wire [8:0] _io_lb_write_bits_offset_T = refill_address_inc[11:3]; // @[Edges.scala:269:29] assign io_lb_write_bits_offset_0 = _io_lb_write_bits_offset_T[2:0]; // @[mshrs.scala:36:7, :238:{31,53}] assign io_mem_grant_ready_0 = ~_GEN_29 & _io_probe_rdy_T_3 & (~opdata | io_lb_write_ready_0); // @[package.scala:16:47] wire _grantack_valid_T = io_mem_grant_bits_opcode_0[2]; // @[Edges.scala:71:36] wire _grantack_valid_T_1 = io_mem_grant_bits_opcode_0[1]; // @[Edges.scala:71:52] wire _grantack_valid_T_2 = ~_grantack_valid_T_1; // @[Edges.scala:71:{43,52}] wire _grantack_valid_T_3 = _grantack_valid_T & _grantack_valid_T_2; // @[Edges.scala:71:{36,40,43}] wire [4:0] _state_T_29 = grant_had_data ? 5'h3 : 5'hC; // @[mshrs.scala:141:27, :250:19] wire _drain_load_T = _rpq_io_deq_bits_uop_mem_cmd == 5'h0; // @[package.scala:16:47] wire _drain_load_T_1 = _rpq_io_deq_bits_uop_mem_cmd == 5'h10; // @[package.scala:16:47] wire _GEN_30 = _rpq_io_deq_bits_uop_mem_cmd == 5'h6; // @[package.scala:16:47] wire _drain_load_T_2; // @[package.scala:16:47] assign _drain_load_T_2 = _GEN_30; // @[package.scala:16:47] wire _r_c_cat_T_48; // @[Consts.scala:91:71] assign _r_c_cat_T_48 = _GEN_30; // @[package.scala:16:47] wire _GEN_31 = _rpq_io_deq_bits_uop_mem_cmd == 5'h7; // @[package.scala:16:47] wire _drain_load_T_3; // @[package.scala:16:47] assign _drain_load_T_3 = _GEN_31; // @[package.scala:16:47] wire _drain_load_T_28; // @[Consts.scala:90:66] assign _drain_load_T_28 = _GEN_31; // @[package.scala:16:47] wire _drain_load_T_4 = _drain_load_T | _drain_load_T_1; // @[package.scala:16:47, :81:59] wire _drain_load_T_5 = _drain_load_T_4 | _drain_load_T_2; // @[package.scala:16:47, :81:59] wire _drain_load_T_6 = _drain_load_T_5 | _drain_load_T_3; // @[package.scala:16:47, :81:59] wire _GEN_32 = _rpq_io_deq_bits_uop_mem_cmd == 5'h4; // @[package.scala:16:47] wire _drain_load_T_7; // @[package.scala:16:47] assign _drain_load_T_7 = _GEN_32; // @[package.scala:16:47] wire _drain_load_T_30; // @[package.scala:16:47] assign _drain_load_T_30 = _GEN_32; // @[package.scala:16:47] wire _GEN_33 = _rpq_io_deq_bits_uop_mem_cmd == 5'h9; // @[package.scala:16:47] wire _drain_load_T_8; // @[package.scala:16:47] assign _drain_load_T_8 = _GEN_33; // @[package.scala:16:47] wire _drain_load_T_31; // @[package.scala:16:47] assign _drain_load_T_31 = _GEN_33; // @[package.scala:16:47] wire _GEN_34 = _rpq_io_deq_bits_uop_mem_cmd == 5'hA; // @[package.scala:16:47] wire _drain_load_T_9; // @[package.scala:16:47] assign _drain_load_T_9 = _GEN_34; // @[package.scala:16:47] wire _drain_load_T_32; // @[package.scala:16:47] assign _drain_load_T_32 = _GEN_34; // @[package.scala:16:47] wire _GEN_35 = _rpq_io_deq_bits_uop_mem_cmd == 5'hB; // @[package.scala:16:47] wire _drain_load_T_10; // @[package.scala:16:47] assign _drain_load_T_10 = _GEN_35; // @[package.scala:16:47] wire _drain_load_T_33; // @[package.scala:16:47] assign _drain_load_T_33 = _GEN_35; // @[package.scala:16:47] wire _drain_load_T_11 = _drain_load_T_7 | _drain_load_T_8; // @[package.scala:16:47, :81:59] wire _drain_load_T_12 = _drain_load_T_11 | _drain_load_T_9; // @[package.scala:16:47, :81:59] wire _drain_load_T_13 = _drain_load_T_12 | _drain_load_T_10; // @[package.scala:16:47, :81:59] wire _GEN_36 = _rpq_io_deq_bits_uop_mem_cmd == 5'h8; // @[package.scala:16:47] wire _drain_load_T_14; // @[package.scala:16:47] assign _drain_load_T_14 = _GEN_36; // @[package.scala:16:47] wire _drain_load_T_37; // @[package.scala:16:47] assign _drain_load_T_37 = _GEN_36; // @[package.scala:16:47] wire _GEN_37 = _rpq_io_deq_bits_uop_mem_cmd == 5'hC; // @[package.scala:16:47] wire _drain_load_T_15; // @[package.scala:16:47] assign _drain_load_T_15 = _GEN_37; // @[package.scala:16:47] wire _drain_load_T_38; // @[package.scala:16:47] assign _drain_load_T_38 = _GEN_37; // @[package.scala:16:47] wire _GEN_38 = _rpq_io_deq_bits_uop_mem_cmd == 5'hD; // @[package.scala:16:47] wire _drain_load_T_16; // @[package.scala:16:47] assign _drain_load_T_16 = _GEN_38; // @[package.scala:16:47] wire _drain_load_T_39; // @[package.scala:16:47] assign _drain_load_T_39 = _GEN_38; // @[package.scala:16:47] wire _GEN_39 = _rpq_io_deq_bits_uop_mem_cmd == 5'hE; // @[package.scala:16:47] wire _drain_load_T_17; // @[package.scala:16:47] assign _drain_load_T_17 = _GEN_39; // @[package.scala:16:47] wire _drain_load_T_40; // @[package.scala:16:47] assign _drain_load_T_40 = _GEN_39; // @[package.scala:16:47] wire _GEN_40 = _rpq_io_deq_bits_uop_mem_cmd == 5'hF; // @[package.scala:16:47] wire _drain_load_T_18; // @[package.scala:16:47] assign _drain_load_T_18 = _GEN_40; // @[package.scala:16:47] wire _drain_load_T_41; // @[package.scala:16:47] assign _drain_load_T_41 = _GEN_40; // @[package.scala:16:47] wire _drain_load_T_19 = _drain_load_T_14 | _drain_load_T_15; // @[package.scala:16:47, :81:59] wire _drain_load_T_20 = _drain_load_T_19 | _drain_load_T_16; // @[package.scala:16:47, :81:59] wire _drain_load_T_21 = _drain_load_T_20 | _drain_load_T_17; // @[package.scala:16:47, :81:59] wire _drain_load_T_22 = _drain_load_T_21 | _drain_load_T_18; // @[package.scala:16:47, :81:59] wire _drain_load_T_23 = _drain_load_T_13 | _drain_load_T_22; // @[package.scala:81:59] wire _drain_load_T_24 = _drain_load_T_6 | _drain_load_T_23; // @[package.scala:81:59] wire _drain_load_T_25 = _rpq_io_deq_bits_uop_mem_cmd == 5'h1; // @[Consts.scala:90:32] wire _drain_load_T_26 = _rpq_io_deq_bits_uop_mem_cmd == 5'h11; // @[Consts.scala:90:49] wire _drain_load_T_27 = _drain_load_T_25 | _drain_load_T_26; // @[Consts.scala:90:{32,42,49}] wire _drain_load_T_29 = _drain_load_T_27 | _drain_load_T_28; // @[Consts.scala:90:{42,59,66}] wire _drain_load_T_34 = _drain_load_T_30 | _drain_load_T_31; // @[package.scala:16:47, :81:59] wire _drain_load_T_35 = _drain_load_T_34 | _drain_load_T_32; // @[package.scala:16:47, :81:59] wire _drain_load_T_36 = _drain_load_T_35 | _drain_load_T_33; // @[package.scala:16:47, :81:59] wire _drain_load_T_42 = _drain_load_T_37 | _drain_load_T_38; // @[package.scala:16:47, :81:59] wire _drain_load_T_43 = _drain_load_T_42 | _drain_load_T_39; // @[package.scala:16:47, :81:59] wire _drain_load_T_44 = _drain_load_T_43 | _drain_load_T_40; // @[package.scala:16:47, :81:59] wire _drain_load_T_45 = _drain_load_T_44 | _drain_load_T_41; // @[package.scala:16:47, :81:59] wire _drain_load_T_46 = _drain_load_T_36 | _drain_load_T_45; // @[package.scala:81:59] wire _drain_load_T_47 = _drain_load_T_29 | _drain_load_T_46; // @[Consts.scala:87:44, :90:{59,76}] wire _drain_load_T_48 = ~_drain_load_T_47; // @[Consts.scala:90:76] wire _drain_load_T_49 = _drain_load_T_24 & _drain_load_T_48; // @[Consts.scala:89:68] wire _drain_load_T_50 = _rpq_io_deq_bits_uop_mem_cmd != 5'h6; // @[mshrs.scala:128:19, :259:51] wire drain_load = _drain_load_T_49 & _drain_load_T_50; // @[mshrs.scala:257:59, :258:60, :259:51] wire [5:0] _rp_addr_T = _rpq_io_deq_bits_addr[5:0]; // @[mshrs.scala:128:19, :261:61] wire [33:0] rp_addr = {rp_addr_hi, _rp_addr_T}; // @[mshrs.scala:261:{22,61}] wire [1:0] size; // @[AMOALU.scala:11:18] wire _rpq_io_deq_ready_T = io_resp_ready_0 & io_lb_read_ready_0; // @[mshrs.scala:36:7, :270:45] wire _rpq_io_deq_ready_T_1 = _rpq_io_deq_ready_T & drain_load; // @[mshrs.scala:258:60, :270:{45,65}] wire _io_lb_read_valid_T = _rpq_io_deq_valid & drain_load; // @[mshrs.scala:128:19, :258:60, :271:48] wire [30:0] _io_lb_read_bits_offset_T = _rpq_io_deq_bits_addr[33:3]; // @[mshrs.scala:128:19, :273:52] wire _GEN_41 = io_lb_read_ready_0 & io_lb_read_valid_0; // @[Decoupled.scala:51:35] wire _io_resp_valid_T; // @[Decoupled.scala:51:35] assign _io_resp_valid_T = _GEN_41; // @[Decoupled.scala:51:35] wire _io_refill_valid_T; // @[Decoupled.scala:51:35] assign _io_refill_valid_T = _GEN_41; // @[Decoupled.scala:51:35] wire _io_resp_valid_T_1 = _rpq_io_deq_valid & _io_resp_valid_T; // @[Decoupled.scala:51:35] wire _io_resp_valid_T_2 = _io_resp_valid_T_1 & drain_load; // @[mshrs.scala:258:60, :275:{43,62}] wire _GEN_42 = ~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3; // @[package.scala:16:47] assign io_resp_valid_0 = ~_GEN_42 & _io_probe_rdy_T_4 & _io_resp_valid_T_2; // @[package.scala:16:47] wire _io_resp_bits_data_shifted_T = _rpq_io_deq_bits_addr[2]; // @[AMOALU.scala:42:29] wire [31:0] _io_resp_bits_data_shifted_T_1 = data_word[63:32]; // @[AMOALU.scala:42:37] wire [31:0] _io_resp_bits_data_T_5 = data_word[63:32]; // @[AMOALU.scala:42:37, :45:94] wire [31:0] _io_resp_bits_data_shifted_T_2 = data_word[31:0]; // @[AMOALU.scala:42:55] wire [31:0] io_resp_bits_data_shifted = _io_resp_bits_data_shifted_T ? _io_resp_bits_data_shifted_T_1 : _io_resp_bits_data_shifted_T_2; // @[AMOALU.scala:42:{24,29,37,55}] wire [31:0] io_resp_bits_data_zeroed = io_resp_bits_data_shifted; // @[AMOALU.scala:42:24, :44:23] wire _io_resp_bits_data_T = size == 2'h2; // @[AMOALU.scala:11:18, :45:26] wire _io_resp_bits_data_T_1 = _io_resp_bits_data_T; // @[AMOALU.scala:45:{26,34}] wire _io_resp_bits_data_T_2 = io_resp_bits_data_zeroed[31]; // @[AMOALU.scala:44:23, :45:81] wire _io_resp_bits_data_T_3 = _rpq_io_deq_bits_uop_mem_signed & _io_resp_bits_data_T_2; // @[AMOALU.scala:45:{72,81}] wire [31:0] _io_resp_bits_data_T_4 = {32{_io_resp_bits_data_T_3}}; // @[AMOALU.scala:45:{49,72}] wire [31:0] _io_resp_bits_data_T_6 = _io_resp_bits_data_T_1 ? _io_resp_bits_data_T_4 : _io_resp_bits_data_T_5; // @[AMOALU.scala:45:{20,34,49,94}] wire [63:0] _io_resp_bits_data_T_7 = {_io_resp_bits_data_T_6, io_resp_bits_data_zeroed}; // @[AMOALU.scala:44:23, :45:{16,20}] wire _io_resp_bits_data_shifted_T_3 = _rpq_io_deq_bits_addr[1]; // @[AMOALU.scala:42:29] wire [15:0] _io_resp_bits_data_shifted_T_4 = _io_resp_bits_data_T_7[31:16]; // @[AMOALU.scala:42:37, :45:16] wire [15:0] _io_resp_bits_data_shifted_T_5 = _io_resp_bits_data_T_7[15:0]; // @[AMOALU.scala:42:55, :45:16] wire [15:0] io_resp_bits_data_shifted_1 = _io_resp_bits_data_shifted_T_3 ? _io_resp_bits_data_shifted_T_4 : _io_resp_bits_data_shifted_T_5; // @[AMOALU.scala:42:{24,29,37,55}] wire [15:0] io_resp_bits_data_zeroed_1 = io_resp_bits_data_shifted_1; // @[AMOALU.scala:42:24, :44:23] wire _io_resp_bits_data_T_8 = size == 2'h1; // @[AMOALU.scala:11:18, :45:26] wire _io_resp_bits_data_T_9 = _io_resp_bits_data_T_8; // @[AMOALU.scala:45:{26,34}] wire _io_resp_bits_data_T_10 = io_resp_bits_data_zeroed_1[15]; // @[AMOALU.scala:44:23, :45:81] wire _io_resp_bits_data_T_11 = _rpq_io_deq_bits_uop_mem_signed & _io_resp_bits_data_T_10; // @[AMOALU.scala:45:{72,81}] wire [47:0] _io_resp_bits_data_T_12 = {48{_io_resp_bits_data_T_11}}; // @[AMOALU.scala:45:{49,72}] wire [47:0] _io_resp_bits_data_T_13 = _io_resp_bits_data_T_7[63:16]; // @[AMOALU.scala:45:{16,94}] wire [47:0] _io_resp_bits_data_T_14 = _io_resp_bits_data_T_9 ? _io_resp_bits_data_T_12 : _io_resp_bits_data_T_13; // @[AMOALU.scala:45:{20,34,49,94}] wire [63:0] _io_resp_bits_data_T_15 = {_io_resp_bits_data_T_14, io_resp_bits_data_zeroed_1}; // @[AMOALU.scala:44:23, :45:{16,20}] wire _io_resp_bits_data_shifted_T_6 = _rpq_io_deq_bits_addr[0]; // @[AMOALU.scala:42:29] wire [7:0] _io_resp_bits_data_shifted_T_7 = _io_resp_bits_data_T_15[15:8]; // @[AMOALU.scala:42:37, :45:16] wire [7:0] _io_resp_bits_data_shifted_T_8 = _io_resp_bits_data_T_15[7:0]; // @[AMOALU.scala:42:55, :45:16] wire [7:0] io_resp_bits_data_shifted_2 = _io_resp_bits_data_shifted_T_6 ? _io_resp_bits_data_shifted_T_7 : _io_resp_bits_data_shifted_T_8; // @[AMOALU.scala:42:{24,29,37,55}] wire [7:0] io_resp_bits_data_zeroed_2 = io_resp_bits_data_shifted_2; // @[AMOALU.scala:42:24, :44:23] wire _io_resp_bits_data_T_16 = size == 2'h0; // @[AMOALU.scala:11:18, :45:26] wire _io_resp_bits_data_T_17 = _io_resp_bits_data_T_16; // @[AMOALU.scala:45:{26,34}] wire _io_resp_bits_data_T_18 = io_resp_bits_data_zeroed_2[7]; // @[AMOALU.scala:44:23, :45:81] wire _io_resp_bits_data_T_19 = _rpq_io_deq_bits_uop_mem_signed & _io_resp_bits_data_T_18; // @[AMOALU.scala:45:{72,81}] wire [55:0] _io_resp_bits_data_T_20 = {56{_io_resp_bits_data_T_19}}; // @[AMOALU.scala:45:{49,72}] wire [55:0] _io_resp_bits_data_T_21 = _io_resp_bits_data_T_15[63:8]; // @[AMOALU.scala:45:{16,94}] wire [55:0] _io_resp_bits_data_T_22 = _io_resp_bits_data_T_17 ? _io_resp_bits_data_T_20 : _io_resp_bits_data_T_21; // @[AMOALU.scala:45:{20,34,49,94}] assign _io_resp_bits_data_T_23 = {_io_resp_bits_data_T_22, io_resp_bits_data_zeroed_2}; // @[AMOALU.scala:44:23, :45:{16,20}] assign io_resp_bits_data_0 = _io_resp_bits_data_T_23; // @[AMOALU.scala:45:16] wire _T_26 = rpq_io_deq_ready & _rpq_io_deq_valid; // @[Decoupled.scala:51:35] wire _T_28 = _rpq_io_empty & ~commit_line; // @[mshrs.scala:128:19, :140:24, :282:{31,34}] wire _T_33 = _rpq_io_empty | _rpq_io_deq_valid & ~drain_load; // @[mshrs.scala:128:19, :258:60, :288:{31,52,55}] assign io_commit_val_0 = ~_GEN_42 & _io_probe_rdy_T_4 & ~(_T_26 | _T_28) & _T_33; // @[Decoupled.scala:51:35] wire _io_meta_read_valid_T = ~io_prober_state_valid_0; // @[mshrs.scala:36:7, :295:27] wire _io_meta_read_valid_T_1 = ~grantack_valid; // @[mshrs.scala:138:21, :295:53] wire _io_meta_read_valid_T_2 = _io_meta_read_valid_T | _io_meta_read_valid_T_1; // @[mshrs.scala:295:{27,50,53}] wire [3:0] _io_meta_read_valid_T_3 = io_prober_state_bits_0[9:6]; // @[mshrs.scala:36:7, :295:93] wire _io_meta_read_valid_T_4 = _io_meta_read_valid_T_3 != req_idx; // @[mshrs.scala:110:25, :295:{93,120}] wire _io_meta_read_valid_T_5 = _io_meta_read_valid_T_2 | _io_meta_read_valid_T_4; // @[mshrs.scala:295:{50,69,120}] assign io_meta_read_valid_0 = ~(~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _io_probe_rdy_T_4) & _io_probe_rdy_T_8 & _io_meta_read_valid_T_5; // @[package.scala:16:47] assign io_meta_write_bits_data_tag_0 = req_tag[21:0]; // @[mshrs.scala:36:7, :111:26, :297:27] assign io_meta_read_bits_tag_0 = req_tag[21:0]; // @[mshrs.scala:36:7, :111:26, :297:27] wire _T_36 = state == 5'h5; // @[mshrs.scala:107:22, :302:22] wire _T_37 = state == 5'h6; // @[mshrs.scala:107:22, :304:22] wire [3:0] _needs_wb_r_T_6 = {2'h2, io_meta_resp_bits_coh_state_0}; // @[Metadata.scala:120:19] wire _needs_wb_r_T_19 = _needs_wb_r_T_6 == 4'h8; // @[Misc.scala:56:20] wire [2:0] _needs_wb_r_T_21 = _needs_wb_r_T_19 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20] wire _needs_wb_r_T_23 = _needs_wb_r_T_6 == 4'h9; // @[Misc.scala:56:20] wire [2:0] _needs_wb_r_T_25 = _needs_wb_r_T_23 ? 3'h2 : _needs_wb_r_T_21; // @[Misc.scala:38:36, :56:20] wire _needs_wb_r_T_27 = _needs_wb_r_T_6 == 4'hA; // @[Misc.scala:56:20] wire [2:0] _needs_wb_r_T_29 = _needs_wb_r_T_27 ? 3'h1 : _needs_wb_r_T_25; // @[Misc.scala:38:36, :56:20] wire _needs_wb_r_T_31 = _needs_wb_r_T_6 == 4'hB; // @[Misc.scala:56:20] wire _needs_wb_r_T_32 = _needs_wb_r_T_31; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_33 = _needs_wb_r_T_31 ? 3'h1 : _needs_wb_r_T_29; // @[Misc.scala:38:36, :56:20] wire _needs_wb_r_T_35 = _needs_wb_r_T_6 == 4'h4; // @[Misc.scala:56:20] wire _needs_wb_r_T_36 = ~_needs_wb_r_T_35 & _needs_wb_r_T_32; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_37 = _needs_wb_r_T_35 ? 3'h5 : _needs_wb_r_T_33; // @[Misc.scala:38:36, :56:20] wire _needs_wb_r_T_39 = _needs_wb_r_T_6 == 4'h5; // @[Misc.scala:56:20] wire _needs_wb_r_T_40 = ~_needs_wb_r_T_39 & _needs_wb_r_T_36; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_41 = _needs_wb_r_T_39 ? 3'h4 : _needs_wb_r_T_37; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_42 = {1'h0, _needs_wb_r_T_39}; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_43 = _needs_wb_r_T_6 == 4'h6; // @[Misc.scala:56:20] wire _needs_wb_r_T_44 = ~_needs_wb_r_T_43 & _needs_wb_r_T_40; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_45 = _needs_wb_r_T_43 ? 3'h0 : _needs_wb_r_T_41; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_46 = _needs_wb_r_T_43 ? 2'h1 : _needs_wb_r_T_42; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_47 = _needs_wb_r_T_6 == 4'h7; // @[Misc.scala:56:20] wire _needs_wb_r_T_48 = _needs_wb_r_T_47 | _needs_wb_r_T_44; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_49 = _needs_wb_r_T_47 ? 3'h0 : _needs_wb_r_T_45; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_50 = _needs_wb_r_T_47 ? 2'h1 : _needs_wb_r_T_46; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_51 = _needs_wb_r_T_6 == 4'h0; // @[Misc.scala:56:20] wire _needs_wb_r_T_52 = ~_needs_wb_r_T_51 & _needs_wb_r_T_48; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_53 = _needs_wb_r_T_51 ? 3'h5 : _needs_wb_r_T_49; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_54 = _needs_wb_r_T_51 ? 2'h0 : _needs_wb_r_T_50; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_55 = _needs_wb_r_T_6 == 4'h1; // @[Misc.scala:56:20] wire _needs_wb_r_T_56 = ~_needs_wb_r_T_55 & _needs_wb_r_T_52; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_57 = _needs_wb_r_T_55 ? 3'h4 : _needs_wb_r_T_53; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_58 = _needs_wb_r_T_55 ? 2'h1 : _needs_wb_r_T_54; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_59 = _needs_wb_r_T_6 == 4'h2; // @[Misc.scala:56:20] wire _needs_wb_r_T_60 = ~_needs_wb_r_T_59 & _needs_wb_r_T_56; // @[Misc.scala:38:9, :56:20] wire [2:0] _needs_wb_r_T_61 = _needs_wb_r_T_59 ? 3'h3 : _needs_wb_r_T_57; // @[Misc.scala:38:36, :56:20] wire [1:0] _needs_wb_r_T_62 = _needs_wb_r_T_59 ? 2'h2 : _needs_wb_r_T_58; // @[Misc.scala:38:63, :56:20] wire _needs_wb_r_T_63 = _needs_wb_r_T_6 == 4'h3; // @[Misc.scala:56:20] wire needs_wb = _needs_wb_r_T_63 | _needs_wb_r_T_60; // @[Misc.scala:38:9, :56:20] wire [2:0] needs_wb_r_2 = _needs_wb_r_T_63 ? 3'h3 : _needs_wb_r_T_61; // @[Misc.scala:38:36, :56:20] wire [1:0] needs_wb_r_3 = _needs_wb_r_T_63 ? 2'h2 : _needs_wb_r_T_62; // @[Misc.scala:38:63, :56:20] wire [1:0] needs_wb_meta_state = needs_wb_r_3; // @[Misc.scala:38:63] wire _state_T_30 = ~io_meta_resp_valid_0; // @[mshrs.scala:36:7, :306:18] wire [4:0] _state_T_31 = needs_wb ? 5'h7 : 5'hB; // @[Misc.scala:38:9] wire [4:0] _state_T_32 = _state_T_30 ? 5'h4 : _state_T_31; // @[mshrs.scala:306:{17,18}, :307:17] wire _T_38 = state == 5'h7; // @[mshrs.scala:107:22, :308:22] wire _T_40 = state == 5'h9; // @[mshrs.scala:107:22, :318:22] assign io_wb_req_valid_0 = ~(~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _io_probe_rdy_T_4 | _io_probe_rdy_T_8 | _T_36 | _T_37 | _T_38) & _T_40; // @[package.scala:16:47] wire _T_42 = state == 5'hA; // @[mshrs.scala:107:22, :330:22] wire _T_43 = state == 5'hB; // @[mshrs.scala:107:22, :334:22] wire _GEN_43 = _io_probe_rdy_T_8 | _T_36 | _T_37 | _T_38 | _T_40 | _T_42; // @[mshrs.scala:148:129, :179:26, :294:39, :302:{22,41}, :304:{22,41}, :308:{22,40}, :318:{22,36}, :330:{22,37}, :334:41] assign io_lb_read_valid_0 = ~_GEN_42 & (_io_probe_rdy_T_4 ? _io_lb_read_valid_T : ~_GEN_43 & _T_43); // @[package.scala:16:47] assign io_lb_read_bits_offset_0 = _io_probe_rdy_T_4 ? _io_lb_read_bits_offset_T[2:0] : refill_ctr; // @[package.scala:16:47] wire _GEN_44 = _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _io_probe_rdy_T_4 | _GEN_43; // @[package.scala:16:47] assign io_refill_valid_0 = ~(~(|state) | _GEN_44) & _T_43 & _io_refill_valid_T; // @[Decoupled.scala:51:35] wire [5:0] _io_refill_bits_addr_T = {refill_ctr, 3'h0}; // @[mshrs.scala:139:24, :340:59] wire [33:0] _io_refill_bits_addr_T_1 = {req_block_addr[33:6], req_block_addr[5:0] | _io_refill_bits_addr_T}; // @[mshrs.scala:112:51, :340:{45,59}] assign io_refill_bits_addr_0 = _io_refill_bits_addr_T_1[9:0]; // @[mshrs.scala:36:7, :340:{27,45}] wire [3:0] _refill_ctr_T = {1'h0, refill_ctr} + 4'h1; // @[mshrs.scala:139:24, :345:32] wire [2:0] _refill_ctr_T_1 = _refill_ctr_T[2:0]; // @[mshrs.scala:345:32] wire _T_46 = state == 5'hC; // @[mshrs.scala:107:22, :350:22] wire _GEN_45 = _io_probe_rdy_T_8 | _T_36 | _T_37 | _T_38 | _T_40 | _T_42 | _T_43; // @[mshrs.scala:148:129, :164:26, :294:39, :302:{22,41}, :304:{22,41}, :308:{22,40}, :318:{22,36}, :330:{22,37}, :334:{22,41}, :350:39] wire _GEN_46 = _io_probe_rdy_T_4 | _GEN_45; // @[package.scala:16:47] assign io_replay_valid_0 = ~(~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _GEN_46) & _T_46 & _rpq_io_deq_valid; // @[package.scala:16:47] assign rpq_io_deq_ready = ~_GEN_42 & (_io_probe_rdy_T_4 ? _rpq_io_deq_ready_T_1 : ~_GEN_45 & _T_46 & io_replay_ready_0); // @[package.scala:16:47] wire [5:0] _io_replay_bits_addr_T = _rpq_io_deq_bits_addr[5:0]; // @[mshrs.scala:128:19, :353:70] assign _io_replay_bits_addr_T_1 = {io_replay_bits_addr_hi, _io_replay_bits_addr_T}; // @[mshrs.scala:353:{31,70}] assign io_replay_bits_addr_0 = _io_replay_bits_addr_T_1; // @[mshrs.scala:36:7, :353:31] wire _T_48 = _rpq_io_deq_bits_uop_mem_cmd == 5'h1; // @[Consts.scala:90:32] wire _r_c_cat_T; // @[Consts.scala:90:32] assign _r_c_cat_T = _T_48; // @[Consts.scala:90:32] wire _r_c_cat_T_23; // @[Consts.scala:90:32] assign _r_c_cat_T_23 = _T_48; // @[Consts.scala:90:32] wire _T_49 = _rpq_io_deq_bits_uop_mem_cmd == 5'h11; // @[Consts.scala:90:49] wire _r_c_cat_T_1; // @[Consts.scala:90:49] assign _r_c_cat_T_1 = _T_49; // @[Consts.scala:90:49] wire _r_c_cat_T_24; // @[Consts.scala:90:49] assign _r_c_cat_T_24 = _T_49; // @[Consts.scala:90:49] wire _T_51 = _rpq_io_deq_bits_uop_mem_cmd == 5'h7; // @[Consts.scala:90:66] wire _r_c_cat_T_3; // @[Consts.scala:90:66] assign _r_c_cat_T_3 = _T_51; // @[Consts.scala:90:66] wire _r_c_cat_T_26; // @[Consts.scala:90:66] assign _r_c_cat_T_26 = _T_51; // @[Consts.scala:90:66] wire _T_53 = _rpq_io_deq_bits_uop_mem_cmd == 5'h4; // @[package.scala:16:47] wire _r_c_cat_T_5; // @[package.scala:16:47] assign _r_c_cat_T_5 = _T_53; // @[package.scala:16:47] wire _r_c_cat_T_28; // @[package.scala:16:47] assign _r_c_cat_T_28 = _T_53; // @[package.scala:16:47] wire _T_54 = _rpq_io_deq_bits_uop_mem_cmd == 5'h9; // @[package.scala:16:47] wire _r_c_cat_T_6; // @[package.scala:16:47] assign _r_c_cat_T_6 = _T_54; // @[package.scala:16:47] wire _r_c_cat_T_29; // @[package.scala:16:47] assign _r_c_cat_T_29 = _T_54; // @[package.scala:16:47] wire _T_55 = _rpq_io_deq_bits_uop_mem_cmd == 5'hA; // @[package.scala:16:47] wire _r_c_cat_T_7; // @[package.scala:16:47] assign _r_c_cat_T_7 = _T_55; // @[package.scala:16:47] wire _r_c_cat_T_30; // @[package.scala:16:47] assign _r_c_cat_T_30 = _T_55; // @[package.scala:16:47] wire _T_56 = _rpq_io_deq_bits_uop_mem_cmd == 5'hB; // @[package.scala:16:47] wire _r_c_cat_T_8; // @[package.scala:16:47] assign _r_c_cat_T_8 = _T_56; // @[package.scala:16:47] wire _r_c_cat_T_31; // @[package.scala:16:47] assign _r_c_cat_T_31 = _T_56; // @[package.scala:16:47] wire _T_60 = _rpq_io_deq_bits_uop_mem_cmd == 5'h8; // @[package.scala:16:47] wire _r_c_cat_T_12; // @[package.scala:16:47] assign _r_c_cat_T_12 = _T_60; // @[package.scala:16:47] wire _r_c_cat_T_35; // @[package.scala:16:47] assign _r_c_cat_T_35 = _T_60; // @[package.scala:16:47] wire _T_61 = _rpq_io_deq_bits_uop_mem_cmd == 5'hC; // @[package.scala:16:47] wire _r_c_cat_T_13; // @[package.scala:16:47] assign _r_c_cat_T_13 = _T_61; // @[package.scala:16:47] wire _r_c_cat_T_36; // @[package.scala:16:47] assign _r_c_cat_T_36 = _T_61; // @[package.scala:16:47] wire _T_62 = _rpq_io_deq_bits_uop_mem_cmd == 5'hD; // @[package.scala:16:47] wire _r_c_cat_T_14; // @[package.scala:16:47] assign _r_c_cat_T_14 = _T_62; // @[package.scala:16:47] wire _r_c_cat_T_37; // @[package.scala:16:47] assign _r_c_cat_T_37 = _T_62; // @[package.scala:16:47] wire _T_63 = _rpq_io_deq_bits_uop_mem_cmd == 5'hE; // @[package.scala:16:47] wire _r_c_cat_T_15; // @[package.scala:16:47] assign _r_c_cat_T_15 = _T_63; // @[package.scala:16:47] wire _r_c_cat_T_38; // @[package.scala:16:47] assign _r_c_cat_T_38 = _T_63; // @[package.scala:16:47] wire _T_64 = _rpq_io_deq_bits_uop_mem_cmd == 5'hF; // @[package.scala:16:47] wire _r_c_cat_T_16; // @[package.scala:16:47] assign _r_c_cat_T_16 = _T_64; // @[package.scala:16:47] wire _r_c_cat_T_39; // @[package.scala:16:47] assign _r_c_cat_T_39 = _T_64; // @[package.scala:16:47] wire _T_71 = io_replay_ready_0 & io_replay_valid_0 & (_T_48 | _T_49 | _T_51 | _T_53 | _T_54 | _T_55 | _T_56 | _T_60 | _T_61 | _T_62 | _T_63 | _T_64); // @[Decoupled.scala:51:35] wire _r_c_cat_T_2 = _r_c_cat_T | _r_c_cat_T_1; // @[Consts.scala:90:{32,42,49}] wire _r_c_cat_T_4 = _r_c_cat_T_2 | _r_c_cat_T_3; // @[Consts.scala:90:{42,59,66}] wire _r_c_cat_T_9 = _r_c_cat_T_5 | _r_c_cat_T_6; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_10 = _r_c_cat_T_9 | _r_c_cat_T_7; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_11 = _r_c_cat_T_10 | _r_c_cat_T_8; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_17 = _r_c_cat_T_12 | _r_c_cat_T_13; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_18 = _r_c_cat_T_17 | _r_c_cat_T_14; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_19 = _r_c_cat_T_18 | _r_c_cat_T_15; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_20 = _r_c_cat_T_19 | _r_c_cat_T_16; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_21 = _r_c_cat_T_11 | _r_c_cat_T_20; // @[package.scala:81:59] wire _r_c_cat_T_22 = _r_c_cat_T_4 | _r_c_cat_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _r_c_cat_T_25 = _r_c_cat_T_23 | _r_c_cat_T_24; // @[Consts.scala:90:{32,42,49}] wire _r_c_cat_T_27 = _r_c_cat_T_25 | _r_c_cat_T_26; // @[Consts.scala:90:{42,59,66}] wire _r_c_cat_T_32 = _r_c_cat_T_28 | _r_c_cat_T_29; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_33 = _r_c_cat_T_32 | _r_c_cat_T_30; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_34 = _r_c_cat_T_33 | _r_c_cat_T_31; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_40 = _r_c_cat_T_35 | _r_c_cat_T_36; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_41 = _r_c_cat_T_40 | _r_c_cat_T_37; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_42 = _r_c_cat_T_41 | _r_c_cat_T_38; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_43 = _r_c_cat_T_42 | _r_c_cat_T_39; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_44 = _r_c_cat_T_34 | _r_c_cat_T_43; // @[package.scala:81:59] wire _r_c_cat_T_45 = _r_c_cat_T_27 | _r_c_cat_T_44; // @[Consts.scala:87:44, :90:{59,76}] wire _r_c_cat_T_46 = _rpq_io_deq_bits_uop_mem_cmd == 5'h3; // @[Consts.scala:91:54] wire _r_c_cat_T_47 = _r_c_cat_T_45 | _r_c_cat_T_46; // @[Consts.scala:90:76, :91:{47,54}] wire _r_c_cat_T_49 = _r_c_cat_T_47 | _r_c_cat_T_48; // @[Consts.scala:91:{47,64,71}] wire [1:0] r_c = {_r_c_cat_T_22, _r_c_cat_T_49}; // @[Metadata.scala:29:18] wire [3:0] _r_T_64 = {r_c, new_coh_state}; // @[Metadata.scala:29:18, :58:19] wire _r_T_89 = _r_T_64 == 4'hC; // @[Misc.scala:49:20] wire [1:0] _r_T_91 = {1'h0, _r_T_89}; // @[Misc.scala:35:36, :49:20] wire _r_T_92 = _r_T_64 == 4'hD; // @[Misc.scala:49:20] wire [1:0] _r_T_94 = _r_T_92 ? 2'h2 : _r_T_91; // @[Misc.scala:35:36, :49:20] wire _r_T_95 = _r_T_64 == 4'h4; // @[Misc.scala:49:20] wire [1:0] _r_T_97 = _r_T_95 ? 2'h1 : _r_T_94; // @[Misc.scala:35:36, :49:20] wire _r_T_98 = _r_T_64 == 4'h5; // @[Misc.scala:49:20] wire [1:0] _r_T_100 = _r_T_98 ? 2'h2 : _r_T_97; // @[Misc.scala:35:36, :49:20] wire _r_T_101 = _r_T_64 == 4'h0; // @[Misc.scala:49:20] wire [1:0] _r_T_103 = _r_T_101 ? 2'h0 : _r_T_100; // @[Misc.scala:35:36, :49:20] wire _r_T_104 = _r_T_64 == 4'hE; // @[Misc.scala:49:20] wire _r_T_105 = _r_T_104; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_106 = _r_T_104 ? 2'h3 : _r_T_103; // @[Misc.scala:35:36, :49:20] wire _r_T_107 = &_r_T_64; // @[Misc.scala:49:20] wire _r_T_108 = _r_T_107 | _r_T_105; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_109 = _r_T_107 ? 2'h3 : _r_T_106; // @[Misc.scala:35:36, :49:20] wire _r_T_110 = _r_T_64 == 4'h6; // @[Misc.scala:49:20] wire _r_T_111 = _r_T_110 | _r_T_108; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_112 = _r_T_110 ? 2'h2 : _r_T_109; // @[Misc.scala:35:36, :49:20] wire _r_T_113 = _r_T_64 == 4'h7; // @[Misc.scala:49:20] wire _r_T_114 = _r_T_113 | _r_T_111; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_115 = _r_T_113 ? 2'h3 : _r_T_112; // @[Misc.scala:35:36, :49:20] wire _r_T_116 = _r_T_64 == 4'h1; // @[Misc.scala:49:20] wire _r_T_117 = _r_T_116 | _r_T_114; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_118 = _r_T_116 ? 2'h1 : _r_T_115; // @[Misc.scala:35:36, :49:20] wire _r_T_119 = _r_T_64 == 4'h2; // @[Misc.scala:49:20] wire _r_T_120 = _r_T_119 | _r_T_117; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_121 = _r_T_119 ? 2'h2 : _r_T_118; // @[Misc.scala:35:36, :49:20] wire _r_T_122 = _r_T_64 == 4'h3; // @[Misc.scala:49:20] wire is_hit = _r_T_122 | _r_T_120; // @[Misc.scala:35:9, :49:20] wire [1:0] r_2_1 = _r_T_122 ? 2'h3 : _r_T_121; // @[Misc.scala:35:36, :49:20] wire [1:0] coh_on_hit_state = r_2_1; // @[Misc.scala:35:36] wire _GEN_47 = _T_40 | _T_42 | _T_43 | _T_46; // @[mshrs.scala:156:26, :318:{22,36}, :330:{22,37}, :334:{22,41}, :350:{22,39}, :363:44] assign io_meta_write_valid_0 = ~(~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _io_probe_rdy_T_4 | _io_probe_rdy_T_8 | _T_36 | _T_37) & (_T_38 | ~_GEN_47 & _sec_rdy_T_4); // @[package.scala:16:47] assign io_meta_write_bits_data_coh_state_0 = _T_38 ? coh_on_clear_state : new_coh_state; // @[Metadata.scala:160:20] wire _GEN_48 = _io_probe_rdy_T_4 | _io_probe_rdy_T_8 | _T_36 | _T_37 | _T_38 | _T_40 | _T_42 | _T_43 | _T_46 | _sec_rdy_T_4; // @[package.scala:16:47] assign io_mem_finish_valid_0 = ~(~(|state) | _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _GEN_48) & _sec_rdy_T_5 & grantack_valid; // @[package.scala:16:47] wire [4:0] _state_T_33 = finish_to_prefetch ? 5'h11 : 5'h0; // @[mshrs.scala:142:31, :381:17] wire _GEN_49 = _sec_rdy_T_4 | _sec_rdy_T_5 | _sec_rdy_T_6; // @[package.scala:16:47] wire _GEN_50 = _T_46 | _GEN_49; // @[mshrs.scala:158:26, :350:{22,39}, :363:44, :373:42, :380:42, :382:38] wire _GEN_51 = _io_probe_rdy_T_4 | _io_probe_rdy_T_8 | _T_36 | _T_37 | _T_38 | _T_40 | _T_42 | _T_43 | _GEN_50; // @[package.scala:16:47] wire _GEN_52 = _io_probe_rdy_T_2 | _io_probe_rdy_T_3 | _GEN_51; // @[package.scala:16:47] assign io_req_pri_rdy_0 = ~(|state) | ~_GEN_52 & _io_way_valid_T_1; // @[package.scala:16:47] wire _T_87 = io_req_sec_val_0 & ~io_req_sec_rdy_0 | io_clear_prefetch_0; // @[mshrs.scala:36:7, :384:{27,30,47}] wire _r_c_cat_T_52 = _r_c_cat_T_50 | _r_c_cat_T_51; // @[Consts.scala:90:{32,42,49}] wire _r_c_cat_T_54 = _r_c_cat_T_52 | _r_c_cat_T_53; // @[Consts.scala:90:{42,59,66}] wire _r_c_cat_T_59 = _r_c_cat_T_55 | _r_c_cat_T_56; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_60 = _r_c_cat_T_59 | _r_c_cat_T_57; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_61 = _r_c_cat_T_60 | _r_c_cat_T_58; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_67 = _r_c_cat_T_62 | _r_c_cat_T_63; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_68 = _r_c_cat_T_67 | _r_c_cat_T_64; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_69 = _r_c_cat_T_68 | _r_c_cat_T_65; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_70 = _r_c_cat_T_69 | _r_c_cat_T_66; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_71 = _r_c_cat_T_61 | _r_c_cat_T_70; // @[package.scala:81:59] wire _r_c_cat_T_72 = _r_c_cat_T_54 | _r_c_cat_T_71; // @[Consts.scala:87:44, :90:{59,76}] wire _r_c_cat_T_75 = _r_c_cat_T_73 | _r_c_cat_T_74; // @[Consts.scala:90:{32,42,49}] wire _r_c_cat_T_77 = _r_c_cat_T_75 | _r_c_cat_T_76; // @[Consts.scala:90:{42,59,66}] wire _r_c_cat_T_82 = _r_c_cat_T_78 | _r_c_cat_T_79; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_83 = _r_c_cat_T_82 | _r_c_cat_T_80; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_84 = _r_c_cat_T_83 | _r_c_cat_T_81; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_90 = _r_c_cat_T_85 | _r_c_cat_T_86; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_91 = _r_c_cat_T_90 | _r_c_cat_T_87; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_92 = _r_c_cat_T_91 | _r_c_cat_T_88; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_93 = _r_c_cat_T_92 | _r_c_cat_T_89; // @[package.scala:16:47, :81:59] wire _r_c_cat_T_94 = _r_c_cat_T_84 | _r_c_cat_T_93; // @[package.scala:81:59] wire _r_c_cat_T_95 = _r_c_cat_T_77 | _r_c_cat_T_94; // @[Consts.scala:87:44, :90:{59,76}] wire _r_c_cat_T_97 = _r_c_cat_T_95 | _r_c_cat_T_96; // @[Consts.scala:90:76, :91:{47,54}] wire _r_c_cat_T_99 = _r_c_cat_T_97 | _r_c_cat_T_98; // @[Consts.scala:91:{47,64,71}] wire [1:0] r_c_1 = {_r_c_cat_T_72, _r_c_cat_T_99}; // @[Metadata.scala:29:18] wire [3:0] _r_T_123 = {r_c_1, new_coh_state}; // @[Metadata.scala:29:18, :58:19] wire _r_T_148 = _r_T_123 == 4'hC; // @[Misc.scala:49:20] wire [1:0] _r_T_150 = {1'h0, _r_T_148}; // @[Misc.scala:35:36, :49:20] wire _r_T_151 = _r_T_123 == 4'hD; // @[Misc.scala:49:20] wire [1:0] _r_T_153 = _r_T_151 ? 2'h2 : _r_T_150; // @[Misc.scala:35:36, :49:20] wire _r_T_154 = _r_T_123 == 4'h4; // @[Misc.scala:49:20] wire [1:0] _r_T_156 = _r_T_154 ? 2'h1 : _r_T_153; // @[Misc.scala:35:36, :49:20] wire _r_T_157 = _r_T_123 == 4'h5; // @[Misc.scala:49:20] wire [1:0] _r_T_159 = _r_T_157 ? 2'h2 : _r_T_156; // @[Misc.scala:35:36, :49:20] wire _r_T_160 = _r_T_123 == 4'h0; // @[Misc.scala:49:20] wire [1:0] _r_T_162 = _r_T_160 ? 2'h0 : _r_T_159; // @[Misc.scala:35:36, :49:20] wire _r_T_163 = _r_T_123 == 4'hE; // @[Misc.scala:49:20] wire _r_T_164 = _r_T_163; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_165 = _r_T_163 ? 2'h3 : _r_T_162; // @[Misc.scala:35:36, :49:20] wire _r_T_166 = &_r_T_123; // @[Misc.scala:49:20] wire _r_T_167 = _r_T_166 | _r_T_164; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_168 = _r_T_166 ? 2'h3 : _r_T_165; // @[Misc.scala:35:36, :49:20] wire _r_T_169 = _r_T_123 == 4'h6; // @[Misc.scala:49:20] wire _r_T_170 = _r_T_169 | _r_T_167; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_171 = _r_T_169 ? 2'h2 : _r_T_168; // @[Misc.scala:35:36, :49:20] wire _r_T_172 = _r_T_123 == 4'h7; // @[Misc.scala:49:20] wire _r_T_173 = _r_T_172 | _r_T_170; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_174 = _r_T_172 ? 2'h3 : _r_T_171; // @[Misc.scala:35:36, :49:20] wire _r_T_175 = _r_T_123 == 4'h1; // @[Misc.scala:49:20] wire _r_T_176 = _r_T_175 | _r_T_173; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_177 = _r_T_175 ? 2'h1 : _r_T_174; // @[Misc.scala:35:36, :49:20] wire _r_T_178 = _r_T_123 == 4'h2; // @[Misc.scala:49:20] wire _r_T_179 = _r_T_178 | _r_T_176; // @[Misc.scala:35:9, :49:20] wire [1:0] _r_T_180 = _r_T_178 ? 2'h2 : _r_T_177; // @[Misc.scala:35:36, :49:20] wire _r_T_181 = _r_T_123 == 4'h3; // @[Misc.scala:49:20] wire is_hit_1 = _r_T_181 | _r_T_179; // @[Misc.scala:35:9, :49:20] wire [1:0] r_2_2 = _r_T_181 ? 2'h3 : _r_T_180; // @[Misc.scala:35:36, :49:20] wire [1:0] coh_on_hit_1_state = r_2_2; // @[Misc.scala:35:36] wire [4:0] state_new_state_1; // @[mshrs.scala:191:29] wire _state_T_35 = ~_state_T_34; // @[mshrs.scala:194:11] wire _state_T_36 = ~_rpq_io_enq_ready; // @[mshrs.scala:128:19, :194:11] wire _state_req_needs_wb_r_T_83 = _state_req_needs_wb_r_T_70 == 4'h8; // @[Misc.scala:56:20] wire [2:0] _state_req_needs_wb_r_T_85 = _state_req_needs_wb_r_T_83 ? 3'h5 : 3'h0; // @[Misc.scala:38:36, :56:20] wire _state_req_needs_wb_r_T_87 = _state_req_needs_wb_r_T_70 == 4'h9; // @[Misc.scala:56:20] wire [2:0] _state_req_needs_wb_r_T_89 = _state_req_needs_wb_r_T_87 ? 3'h2 : _state_req_needs_wb_r_T_85; // @[Misc.scala:38:36, :56:20] wire _state_req_needs_wb_r_T_91 = _state_req_needs_wb_r_T_70 == 4'hA; // @[Misc.scala:56:20] wire [2:0] _state_req_needs_wb_r_T_93 = _state_req_needs_wb_r_T_91 ? 3'h1 : _state_req_needs_wb_r_T_89; // @[Misc.scala:38:36, :56:20] wire _state_req_needs_wb_r_T_95 = _state_req_needs_wb_r_T_70 == 4'hB; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_96 = _state_req_needs_wb_r_T_95; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_97 = _state_req_needs_wb_r_T_95 ? 3'h1 : _state_req_needs_wb_r_T_93; // @[Misc.scala:38:36, :56:20] wire _state_req_needs_wb_r_T_99 = _state_req_needs_wb_r_T_70 == 4'h4; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_100 = ~_state_req_needs_wb_r_T_99 & _state_req_needs_wb_r_T_96; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_101 = _state_req_needs_wb_r_T_99 ? 3'h5 : _state_req_needs_wb_r_T_97; // @[Misc.scala:38:36, :56:20] wire _state_req_needs_wb_r_T_103 = _state_req_needs_wb_r_T_70 == 4'h5; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_104 = ~_state_req_needs_wb_r_T_103 & _state_req_needs_wb_r_T_100; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_105 = _state_req_needs_wb_r_T_103 ? 3'h4 : _state_req_needs_wb_r_T_101; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_106 = {1'h0, _state_req_needs_wb_r_T_103}; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_107 = _state_req_needs_wb_r_T_70 == 4'h6; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_108 = ~_state_req_needs_wb_r_T_107 & _state_req_needs_wb_r_T_104; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_109 = _state_req_needs_wb_r_T_107 ? 3'h0 : _state_req_needs_wb_r_T_105; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_110 = _state_req_needs_wb_r_T_107 ? 2'h1 : _state_req_needs_wb_r_T_106; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_111 = _state_req_needs_wb_r_T_70 == 4'h7; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_112 = _state_req_needs_wb_r_T_111 | _state_req_needs_wb_r_T_108; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_113 = _state_req_needs_wb_r_T_111 ? 3'h0 : _state_req_needs_wb_r_T_109; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_114 = _state_req_needs_wb_r_T_111 ? 2'h1 : _state_req_needs_wb_r_T_110; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_115 = _state_req_needs_wb_r_T_70 == 4'h0; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_116 = ~_state_req_needs_wb_r_T_115 & _state_req_needs_wb_r_T_112; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_117 = _state_req_needs_wb_r_T_115 ? 3'h5 : _state_req_needs_wb_r_T_113; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_118 = _state_req_needs_wb_r_T_115 ? 2'h0 : _state_req_needs_wb_r_T_114; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_119 = _state_req_needs_wb_r_T_70 == 4'h1; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_120 = ~_state_req_needs_wb_r_T_119 & _state_req_needs_wb_r_T_116; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_121 = _state_req_needs_wb_r_T_119 ? 3'h4 : _state_req_needs_wb_r_T_117; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_122 = _state_req_needs_wb_r_T_119 ? 2'h1 : _state_req_needs_wb_r_T_118; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_123 = _state_req_needs_wb_r_T_70 == 4'h2; // @[Misc.scala:56:20] wire _state_req_needs_wb_r_T_124 = ~_state_req_needs_wb_r_T_123 & _state_req_needs_wb_r_T_120; // @[Misc.scala:38:9, :56:20] wire [2:0] _state_req_needs_wb_r_T_125 = _state_req_needs_wb_r_T_123 ? 3'h3 : _state_req_needs_wb_r_T_121; // @[Misc.scala:38:36, :56:20] wire [1:0] _state_req_needs_wb_r_T_126 = _state_req_needs_wb_r_T_123 ? 2'h2 : _state_req_needs_wb_r_T_122; // @[Misc.scala:38:63, :56:20] wire _state_req_needs_wb_r_T_127 = _state_req_needs_wb_r_T_70 == 4'h3; // @[Misc.scala:56:20] wire state_req_needs_wb_r_1_1 = _state_req_needs_wb_r_T_127 | _state_req_needs_wb_r_T_124; // @[Misc.scala:38:9, :56:20] wire [2:0] state_req_needs_wb_r_2_1 = _state_req_needs_wb_r_T_127 ? 3'h3 : _state_req_needs_wb_r_T_125; // @[Misc.scala:38:36, :56:20] wire [1:0] state_req_needs_wb_r_3_1 = _state_req_needs_wb_r_T_127 ? 2'h2 : _state_req_needs_wb_r_T_126; // @[Misc.scala:38:63, :56:20] wire [1:0] state_req_needs_wb_meta_1_state = state_req_needs_wb_r_3_1; // @[Misc.scala:38:63] wire _state_r_c_cat_T_52 = _state_r_c_cat_T_50 | _state_r_c_cat_T_51; // @[Consts.scala:90:{32,42,49}] wire _state_r_c_cat_T_54 = _state_r_c_cat_T_52 | _state_r_c_cat_T_53; // @[Consts.scala:90:{42,59,66}] wire _state_r_c_cat_T_59 = _state_r_c_cat_T_55 | _state_r_c_cat_T_56; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_60 = _state_r_c_cat_T_59 | _state_r_c_cat_T_57; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_61 = _state_r_c_cat_T_60 | _state_r_c_cat_T_58; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_67 = _state_r_c_cat_T_62 | _state_r_c_cat_T_63; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_68 = _state_r_c_cat_T_67 | _state_r_c_cat_T_64; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_69 = _state_r_c_cat_T_68 | _state_r_c_cat_T_65; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_70 = _state_r_c_cat_T_69 | _state_r_c_cat_T_66; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_71 = _state_r_c_cat_T_61 | _state_r_c_cat_T_70; // @[package.scala:81:59] wire _state_r_c_cat_T_72 = _state_r_c_cat_T_54 | _state_r_c_cat_T_71; // @[Consts.scala:87:44, :90:{59,76}] wire _state_r_c_cat_T_75 = _state_r_c_cat_T_73 | _state_r_c_cat_T_74; // @[Consts.scala:90:{32,42,49}] wire _state_r_c_cat_T_77 = _state_r_c_cat_T_75 | _state_r_c_cat_T_76; // @[Consts.scala:90:{42,59,66}] wire _state_r_c_cat_T_82 = _state_r_c_cat_T_78 | _state_r_c_cat_T_79; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_83 = _state_r_c_cat_T_82 | _state_r_c_cat_T_80; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_84 = _state_r_c_cat_T_83 | _state_r_c_cat_T_81; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_90 = _state_r_c_cat_T_85 | _state_r_c_cat_T_86; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_91 = _state_r_c_cat_T_90 | _state_r_c_cat_T_87; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_92 = _state_r_c_cat_T_91 | _state_r_c_cat_T_88; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_93 = _state_r_c_cat_T_92 | _state_r_c_cat_T_89; // @[package.scala:16:47, :81:59] wire _state_r_c_cat_T_94 = _state_r_c_cat_T_84 | _state_r_c_cat_T_93; // @[package.scala:81:59] wire _state_r_c_cat_T_95 = _state_r_c_cat_T_77 | _state_r_c_cat_T_94; // @[Consts.scala:87:44, :90:{59,76}] wire _state_r_c_cat_T_97 = _state_r_c_cat_T_95 | _state_r_c_cat_T_96; // @[Consts.scala:90:76, :91:{47,54}] wire _state_r_c_cat_T_99 = _state_r_c_cat_T_97 | _state_r_c_cat_T_98; // @[Consts.scala:91:{47,64,71}] wire [1:0] state_r_c_1 = {_state_r_c_cat_T_72, _state_r_c_cat_T_99}; // @[Metadata.scala:29:18] wire [3:0] _state_r_T_59 = {state_r_c_1, io_req_old_meta_coh_state_0}; // @[Metadata.scala:29:18, :58:19] wire _state_r_T_84 = _state_r_T_59 == 4'hC; // @[Misc.scala:49:20] wire [1:0] _state_r_T_86 = {1'h0, _state_r_T_84}; // @[Misc.scala:35:36, :49:20] wire _state_r_T_87 = _state_r_T_59 == 4'hD; // @[Misc.scala:49:20] wire [1:0] _state_r_T_89 = _state_r_T_87 ? 2'h2 : _state_r_T_86; // @[Misc.scala:35:36, :49:20] wire _state_r_T_90 = _state_r_T_59 == 4'h4; // @[Misc.scala:49:20] wire [1:0] _state_r_T_92 = _state_r_T_90 ? 2'h1 : _state_r_T_89; // @[Misc.scala:35:36, :49:20] wire _state_r_T_93 = _state_r_T_59 == 4'h5; // @[Misc.scala:49:20] wire [1:0] _state_r_T_95 = _state_r_T_93 ? 2'h2 : _state_r_T_92; // @[Misc.scala:35:36, :49:20] wire _state_r_T_96 = _state_r_T_59 == 4'h0; // @[Misc.scala:49:20] wire [1:0] _state_r_T_98 = _state_r_T_96 ? 2'h0 : _state_r_T_95; // @[Misc.scala:35:36, :49:20] wire _state_r_T_99 = _state_r_T_59 == 4'hE; // @[Misc.scala:49:20] wire _state_r_T_100 = _state_r_T_99; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_101 = _state_r_T_99 ? 2'h3 : _state_r_T_98; // @[Misc.scala:35:36, :49:20] wire _state_r_T_102 = &_state_r_T_59; // @[Misc.scala:49:20] wire _state_r_T_103 = _state_r_T_102 | _state_r_T_100; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_104 = _state_r_T_102 ? 2'h3 : _state_r_T_101; // @[Misc.scala:35:36, :49:20] wire _state_r_T_105 = _state_r_T_59 == 4'h6; // @[Misc.scala:49:20] wire _state_r_T_106 = _state_r_T_105 | _state_r_T_103; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_107 = _state_r_T_105 ? 2'h2 : _state_r_T_104; // @[Misc.scala:35:36, :49:20] wire _state_r_T_108 = _state_r_T_59 == 4'h7; // @[Misc.scala:49:20] wire _state_r_T_109 = _state_r_T_108 | _state_r_T_106; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_110 = _state_r_T_108 ? 2'h3 : _state_r_T_107; // @[Misc.scala:35:36, :49:20] wire _state_r_T_111 = _state_r_T_59 == 4'h1; // @[Misc.scala:49:20] wire _state_r_T_112 = _state_r_T_111 | _state_r_T_109; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_113 = _state_r_T_111 ? 2'h1 : _state_r_T_110; // @[Misc.scala:35:36, :49:20] wire _state_r_T_114 = _state_r_T_59 == 4'h2; // @[Misc.scala:49:20] wire _state_r_T_115 = _state_r_T_114 | _state_r_T_112; // @[Misc.scala:35:9, :49:20] wire [1:0] _state_r_T_116 = _state_r_T_114 ? 2'h2 : _state_r_T_113; // @[Misc.scala:35:36, :49:20] wire _state_r_T_117 = _state_r_T_59 == 4'h3; // @[Misc.scala:49:20] wire state_is_hit_1 = _state_r_T_117 | _state_r_T_115; // @[Misc.scala:35:9, :49:20] wire [1:0] state_r_2_1 = _state_r_T_117 ? 2'h3 : _state_r_T_116; // @[Misc.scala:35:36, :49:20] wire [1:0] state_coh_on_hit_1_state = state_r_2_1; // @[Misc.scala:35:36] wire _state_T_39 = _state_T_37 | _state_T_38; // @[Consts.scala:90:{32,42,49}] wire _state_T_41 = _state_T_39 | _state_T_40; // @[Consts.scala:90:{42,59,66}] wire _state_T_46 = _state_T_42 | _state_T_43; // @[package.scala:16:47, :81:59] wire _state_T_47 = _state_T_46 | _state_T_44; // @[package.scala:16:47, :81:59] wire _state_T_48 = _state_T_47 | _state_T_45; // @[package.scala:16:47, :81:59] wire _state_T_54 = _state_T_49 | _state_T_50; // @[package.scala:16:47, :81:59] wire _state_T_55 = _state_T_54 | _state_T_51; // @[package.scala:16:47, :81:59] wire _state_T_56 = _state_T_55 | _state_T_52; // @[package.scala:16:47, :81:59] wire _state_T_57 = _state_T_56 | _state_T_53; // @[package.scala:16:47, :81:59] wire _state_T_58 = _state_T_48 | _state_T_57; // @[package.scala:81:59] wire _state_T_59 = _state_T_41 | _state_T_58; // @[Consts.scala:87:44, :90:{59,76}] wire _state_T_61 = ~_state_T_60; // @[mshrs.scala:201:15] wire _state_T_62 = ~_state_T_59; // @[Consts.scala:90:76]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncQueueSource_TLBundleA_a9d32s1k1z2u( // @[AsyncQueue.scala:70:7] input clock, // @[AsyncQueue.scala:70:7] input reset, // @[AsyncQueue.scala:70:7] output io_enq_ready, // @[AsyncQueue.scala:73:14] input io_enq_valid, // @[AsyncQueue.scala:73:14] input [2:0] io_enq_bits_opcode, // @[AsyncQueue.scala:73:14] input [8:0] io_enq_bits_address, // @[AsyncQueue.scala:73:14] input [31:0] io_enq_bits_data, // @[AsyncQueue.scala:73:14] output [2:0] io_async_mem_0_opcode, // @[AsyncQueue.scala:73:14] output [8:0] io_async_mem_0_address, // @[AsyncQueue.scala:73:14] output [31:0] io_async_mem_0_data, // @[AsyncQueue.scala:73:14] input io_async_ridx, // @[AsyncQueue.scala:73:14] output io_async_widx, // @[AsyncQueue.scala:73:14] input io_async_safe_ridx_valid, // @[AsyncQueue.scala:73:14] output io_async_safe_widx_valid, // @[AsyncQueue.scala:73:14] output io_async_safe_source_reset_n, // @[AsyncQueue.scala:73:14] input io_async_safe_sink_reset_n // @[AsyncQueue.scala:73:14] ); wire _sink_extend_io_out; // @[AsyncQueue.scala:105:30] wire _source_valid_0_io_out; // @[AsyncQueue.scala:102:32] wire io_enq_valid_0 = io_enq_valid; // @[AsyncQueue.scala:70:7] wire [2:0] io_enq_bits_opcode_0 = io_enq_bits_opcode; // @[AsyncQueue.scala:70:7] wire [8:0] io_enq_bits_address_0 = io_enq_bits_address; // @[AsyncQueue.scala:70:7] wire [31:0] io_enq_bits_data_0 = io_enq_bits_data; // @[AsyncQueue.scala:70:7] wire io_async_ridx_0 = io_async_ridx; // @[AsyncQueue.scala:70:7] wire io_async_safe_ridx_valid_0 = io_async_safe_ridx_valid; // @[AsyncQueue.scala:70:7] wire io_async_safe_sink_reset_n_0 = io_async_safe_sink_reset_n; // @[AsyncQueue.scala:70:7] wire _widx_T = reset; // @[AsyncQueue.scala:83:30] wire _ready_reg_T = reset; // @[AsyncQueue.scala:90:35] wire _widx_reg_T = reset; // @[AsyncQueue.scala:93:34] wire _source_valid_0_reset_T = reset; // @[AsyncQueue.scala:107:36] wire _source_valid_1_reset_T = reset; // @[AsyncQueue.scala:108:36] wire _sink_extend_reset_T = reset; // @[AsyncQueue.scala:109:36] wire _sink_valid_reset_T = reset; // @[AsyncQueue.scala:110:35] wire _io_async_safe_source_reset_n_T = reset; // @[AsyncQueue.scala:123:34] wire [3:0] io_enq_bits_mask = 4'hF; // @[AsyncQueue.scala:70:7] wire [3:0] io_async_mem_0_mask = 4'hF; // @[AsyncQueue.scala:70:7] wire io_enq_bits_source = 1'h0; // @[AsyncQueue.scala:70:7] wire io_enq_bits_corrupt = 1'h0; // @[AsyncQueue.scala:70:7] wire io_async_mem_0_source = 1'h0; // @[AsyncQueue.scala:70:7] wire io_async_mem_0_corrupt = 1'h0; // @[AsyncQueue.scala:70:7] wire _widx_T_3 = 1'h0; // @[AsyncQueue.scala:54:32] wire [1:0] io_enq_bits_size = 2'h2; // @[AsyncQueue.scala:70:7] wire [1:0] io_async_mem_0_size = 2'h2; // @[AsyncQueue.scala:70:7] wire [2:0] io_enq_bits_param = 3'h0; // @[AsyncQueue.scala:70:7] wire [2:0] io_async_mem_0_param = 3'h0; // @[AsyncQueue.scala:70:7] wire _io_enq_ready_T; // @[AsyncQueue.scala:91:29] wire _io_async_safe_source_reset_n_T_1; // @[AsyncQueue.scala:123:27] wire io_enq_ready_0; // @[AsyncQueue.scala:70:7] wire [2:0] io_async_mem_0_opcode_0; // @[AsyncQueue.scala:70:7] wire [8:0] io_async_mem_0_address_0; // @[AsyncQueue.scala:70:7] wire [31:0] io_async_mem_0_data_0; // @[AsyncQueue.scala:70:7] wire io_async_safe_widx_valid_0; // @[AsyncQueue.scala:70:7] wire io_async_safe_source_reset_n_0; // @[AsyncQueue.scala:70:7] wire io_async_widx_0; // @[AsyncQueue.scala:70:7] wire sink_ready; // @[AsyncQueue.scala:81:28] reg [2:0] mem_0_opcode; // @[AsyncQueue.scala:82:16] assign io_async_mem_0_opcode_0 = mem_0_opcode; // @[AsyncQueue.scala:70:7, :82:16] reg [8:0] mem_0_address; // @[AsyncQueue.scala:82:16] assign io_async_mem_0_address_0 = mem_0_address; // @[AsyncQueue.scala:70:7, :82:16] reg [31:0] mem_0_data; // @[AsyncQueue.scala:82:16] assign io_async_mem_0_data_0 = mem_0_data; // @[AsyncQueue.scala:70:7, :82:16] wire _widx_T_1 = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35] wire _widx_T_2 = ~sink_ready; // @[AsyncQueue.scala:81:28, :83:77] wire _widx_incremented_T_2; // @[AsyncQueue.scala:53:23] wire widx_incremented; // @[AsyncQueue.scala:51:27] wire widx = widx_incremented; // @[AsyncQueue.scala:51:27, :54:17] reg widx_widx_bin; // @[AsyncQueue.scala:52:25] wire [1:0] _widx_incremented_T = {1'h0, widx_widx_bin} + {1'h0, _widx_T_1}; // @[Decoupled.scala:51:35] wire _widx_incremented_T_1 = _widx_incremented_T[0]; // @[AsyncQueue.scala:53:43] assign _widx_incremented_T_2 = ~_widx_T_2 & _widx_incremented_T_1; // @[AsyncQueue.scala:53:{23,43}, :83:77] assign widx_incremented = _widx_incremented_T_2; // @[AsyncQueue.scala:51:27, :53:23] wire ridx; // @[ShiftReg.scala:48:24] wire _ready_T = ~ridx; // @[ShiftReg.scala:48:24] wire _ready_T_1 = widx != _ready_T; // @[AsyncQueue.scala:54:17, :85:{34,44}] wire ready = sink_ready & _ready_T_1; // @[AsyncQueue.scala:81:28, :85:{26,34}] reg ready_reg; // @[AsyncQueue.scala:90:56] assign _io_enq_ready_T = ready_reg & sink_ready; // @[AsyncQueue.scala:81:28, :90:56, :91:29] assign io_enq_ready_0 = _io_enq_ready_T; // @[AsyncQueue.scala:70:7, :91:29] reg widx_gray; // @[AsyncQueue.scala:93:55] assign io_async_widx_0 = widx_gray; // @[AsyncQueue.scala:70:7, :93:55] wire _source_valid_0_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46] wire _source_valid_0_reset_T_2 = _source_valid_0_reset_T | _source_valid_0_reset_T_1; // @[AsyncQueue.scala:107:{36,43,46}] wire _source_valid_0_reset_T_3 = _source_valid_0_reset_T_2; // @[AsyncQueue.scala:107:{43,65}] wire _source_valid_1_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46, :108:46] wire _source_valid_1_reset_T_2 = _source_valid_1_reset_T | _source_valid_1_reset_T_1; // @[AsyncQueue.scala:108:{36,43,46}] wire _source_valid_1_reset_T_3 = _source_valid_1_reset_T_2; // @[AsyncQueue.scala:108:{43,65}] wire _sink_extend_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46, :109:46] wire _sink_extend_reset_T_2 = _sink_extend_reset_T | _sink_extend_reset_T_1; // @[AsyncQueue.scala:109:{36,43,46}] wire _sink_extend_reset_T_3 = _sink_extend_reset_T_2; // @[AsyncQueue.scala:109:{43,65}] assign _io_async_safe_source_reset_n_T_1 = ~_io_async_safe_source_reset_n_T; // @[AsyncQueue.scala:123:{27,34}] assign io_async_safe_source_reset_n_0 = _io_async_safe_source_reset_n_T_1; // @[AsyncQueue.scala:70:7, :123:27] always @(posedge clock) begin // @[AsyncQueue.scala:70:7] if (_widx_T_1) begin // @[Decoupled.scala:51:35] mem_0_opcode <= io_enq_bits_opcode_0; // @[AsyncQueue.scala:70:7, :82:16] mem_0_address <= io_enq_bits_address_0; // @[AsyncQueue.scala:70:7, :82:16] mem_0_data <= io_enq_bits_data_0; // @[AsyncQueue.scala:70:7, :82:16] end always @(posedge) always @(posedge clock or posedge _widx_T) begin // @[AsyncQueue.scala:70:7, :83:30] if (_widx_T) // @[AsyncQueue.scala:70:7, :83:30] widx_widx_bin <= 1'h0; // @[AsyncQueue.scala:52:25] else // @[AsyncQueue.scala:70:7] widx_widx_bin <= widx_incremented; // @[AsyncQueue.scala:51:27, :52:25] always @(posedge, posedge) always @(posedge clock or posedge _ready_reg_T) begin // @[AsyncQueue.scala:70:7, :90:35] if (_ready_reg_T) // @[AsyncQueue.scala:70:7, :90:35] ready_reg <= 1'h0; // @[AsyncQueue.scala:90:56] else // @[AsyncQueue.scala:70:7] ready_reg <= ready; // @[AsyncQueue.scala:85:26, :90:56] always @(posedge, posedge) always @(posedge clock or posedge _widx_reg_T) begin // @[AsyncQueue.scala:70:7, :93:34] if (_widx_reg_T) // @[AsyncQueue.scala:70:7, :93:34] widx_gray <= 1'h0; // @[AsyncQueue.scala:93:55] else // @[AsyncQueue.scala:70:7] widx_gray <= widx; // @[AsyncQueue.scala:54:17, :93:55] always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncValidSync_72( // @[AsyncQueue.scala:58:7] output io_out, // @[AsyncQueue.scala:59:14] input clock, // @[AsyncQueue.scala:63:17] input reset // @[AsyncQueue.scala:64:17] ); wire io_in = 1'h1; // @[ShiftReg.scala:45:23] wire _io_out_WIRE; // @[ShiftReg.scala:48:24] wire io_out_0; // @[AsyncQueue.scala:58:7] assign io_out_0 = _io_out_WIRE; // @[ShiftReg.scala:48:24] AsyncResetSynchronizerShiftReg_w1_d3_i0_85 io_out_source_valid_0 ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (reset), .io_q (_io_out_WIRE) ); // @[ShiftReg.scala:45:23] assign io_out = io_out_0; // @[AsyncQueue.scala:58:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_108( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_26( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [27:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [6:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [27:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [6:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_37 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_39 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_43 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_45 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_49 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_51 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_57 = 1'h1; // @[Parameters.scala:57:20] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [27:0] _c_first_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_first_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_first_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_first_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_set_wo_ready_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_set_wo_ready_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_opcodes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_opcodes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_sizes_set_interm_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_sizes_set_interm_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_opcodes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_opcodes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_sizes_set_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_sizes_set_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_probe_ack_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_probe_ack_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _c_probe_ack_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _c_probe_ack_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _same_cycle_resp_WIRE_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _same_cycle_resp_WIRE_1_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _same_cycle_resp_WIRE_2_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _same_cycle_resp_WIRE_3_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [27:0] _same_cycle_resp_WIRE_4_bits_address = 28'h0; // @[Bundles.scala:265:74] wire [27:0] _same_cycle_resp_WIRE_5_bits_address = 28'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_first_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_first_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_wo_ready_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_wo_ready_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_interm_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_interm_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_opcodes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_opcodes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_sizes_set_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_sizes_set_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _c_probe_ack_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _c_probe_ack_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_1_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_2_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_3_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [6:0] _same_cycle_resp_WIRE_4_bits_source = 7'h0; // @[Bundles.scala:265:74] wire [6:0] _same_cycle_resp_WIRE_5_bits_source = 7'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [1026:0] _c_opcodes_set_T_1 = 1027'h0; // @[Monitor.scala:767:54] wire [1026:0] _c_sizes_set_T_1 = 1027'h0; // @[Monitor.scala:768:52] wire [9:0] _c_opcodes_set_T = 10'h0; // @[Monitor.scala:767:79] wire [9:0] _c_sizes_set_T = 10'h0; // @[Monitor.scala:768:77] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [127:0] _c_set_wo_ready_T = 128'h1; // @[OneHot.scala:58:35] wire [127:0] _c_set_T = 128'h1; // @[OneHot.scala:58:35] wire [259:0] c_opcodes_set = 260'h0; // @[Monitor.scala:740:34] wire [259:0] c_sizes_set = 260'h0; // @[Monitor.scala:741:34] wire [64:0] c_set = 65'h0; // @[Monitor.scala:738:34] wire [64:0] c_set_wo_ready = 65'h0; // @[Monitor.scala:739:34] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [6:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_4 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_5 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [6:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_1 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_7 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_13 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_19 = io_in_a_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire _source_ok_T_25 = io_in_a_bits_source_0 == 7'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31] wire _source_ok_T_26 = io_in_a_bits_source_0 == 7'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31] wire _source_ok_T_27 = io_in_a_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_7 = _source_ok_T_27; // @[Parameters.scala:1138:31] wire _source_ok_T_28 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_29 = _source_ok_T_28 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_30 = _source_ok_T_29 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_31 = _source_ok_T_30 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_32 = _source_ok_T_31 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_33 = _source_ok_T_32 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_33 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [27:0] _is_aligned_T = {22'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 28'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_4 = _uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_5 = _uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_10 = _uncommonBits_T_10[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_11 = _uncommonBits_T_11[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_16 = _uncommonBits_T_16[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_17 = _uncommonBits_T_17[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_22 = _uncommonBits_T_22[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_23 = _uncommonBits_T_23[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_28 = _uncommonBits_T_28[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_29 = _uncommonBits_T_29[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_34 = _uncommonBits_T_34[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_35 = _uncommonBits_T_35[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_34 = io_in_d_bits_source_0 == 7'h10; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_34; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] _source_ok_T_35 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_41 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_47 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire [4:0] _source_ok_T_53 = io_in_d_bits_source_0[6:2]; // @[Monitor.scala:36:7] wire _source_ok_T_36 = _source_ok_T_35 == 5'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_38 = _source_ok_T_36; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_40 = _source_ok_T_38; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_40; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_42 = _source_ok_T_41 == 5'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_44 = _source_ok_T_42; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_46 = _source_ok_T_44; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_46; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_48 = _source_ok_T_47 == 5'h2; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_50 = _source_ok_T_48; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_52 = _source_ok_T_50; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_52; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_54 = _source_ok_T_53 == 5'h3; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_58 = _source_ok_T_56; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_58; // @[Parameters.scala:1138:31] wire _source_ok_T_59 = io_in_d_bits_source_0 == 7'h21; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_5 = _source_ok_T_59; // @[Parameters.scala:1138:31] wire _source_ok_T_60 = io_in_d_bits_source_0 == 7'h20; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_60; // @[Parameters.scala:1138:31] wire _source_ok_T_61 = io_in_d_bits_source_0 == 7'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_7 = _source_ok_T_61; // @[Parameters.scala:1138:31] wire _source_ok_T_62 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_63 = _source_ok_T_62 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_64 = _source_ok_T_63 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_65 = _source_ok_T_64 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_66 = _source_ok_T_65 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_67 = _source_ok_T_66 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_67 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire _T_982 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_982; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_982; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [6:0] source; // @[Monitor.scala:390:22] reg [27:0] address; // @[Monitor.scala:391:22] wire _T_1055 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1055; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1055; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1055; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [6:0] source_1; // @[Monitor.scala:541:22] reg [64:0] inflight; // @[Monitor.scala:614:27] reg [259:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [259:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [64:0] a_set; // @[Monitor.scala:626:34] wire [64:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [259:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [259:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [9:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [9:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [9:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [9:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [9:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [9:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [9:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [9:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [9:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [259:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [259:0] _a_opcode_lookup_T_6 = {256'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [259:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [259:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [259:0] _a_size_lookup_T_6 = {256'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [259:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[259:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [127:0] _GEN_2 = 128'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [127:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_908 = _T_982 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_908 ? _a_set_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_908 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_908 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [9:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [9:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [9:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [1026:0] _a_opcodes_set_T_1 = {1023'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_908 ? _a_opcodes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [1026:0] _a_sizes_set_T_1 = {1023'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_908 ? _a_sizes_set_T_1[259:0] : 260'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [64:0] d_clr; // @[Monitor.scala:664:34] wire [64:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [259:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [259:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_954 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [127:0] _GEN_5 = 128'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [127:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_954 & ~d_release_ack ? _d_clr_wo_ready_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_923 = _T_1055 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_923 ? _d_clr_T[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_5 = 1039'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_923 ? _d_opcodes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [1038:0] _d_sizes_clr_T_5 = 1039'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_923 ? _d_sizes_clr_T_5[259:0] : 260'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [64:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [64:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [64:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [259:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [259:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [259:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [259:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [259:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [259:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [64:0] inflight_1; // @[Monitor.scala:726:35] wire [64:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [259:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [259:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [259:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [259:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [259:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [259:0] _c_opcode_lookup_T_6 = {256'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [259:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[259:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [259:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [259:0] _c_size_lookup_T_6 = {256'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [259:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[259:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [64:0] d_clr_1; // @[Monitor.scala:774:34] wire [64:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [259:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [259:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1026 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1026 & d_release_ack_1 ? _d_clr_wo_ready_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire _T_1008 = _T_1055 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1008 ? _d_clr_T_1[64:0] : 65'h0; // @[OneHot.scala:58:35] wire [1038:0] _d_opcodes_clr_T_11 = 1039'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1008 ? _d_opcodes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [1038:0] _d_sizes_clr_T_11 = 1039'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1008 ? _d_sizes_clr_T_11[259:0] : 260'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 7'h0; // @[Monitor.scala:36:7, :795:113] wire [64:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [64:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [259:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [259:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [259:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [259:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_374( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File TLSerdes.scala: package testchipip.serdes import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy._ import org.chipsalliance.cde.config._ import freechips.rocketchip.util._ import freechips.rocketchip.tilelink._ object TLSerdesser { // This should be the standard bundle type for TLSerdesser val STANDARD_TLBUNDLE_PARAMS = TLBundleParameters( addressBits=64, dataBits=64, sourceBits=8, sinkBits=8, sizeBits=8, echoFields=Nil, requestFields=Nil, responseFields=Nil, hasBCE=true) } class SerdesDebugIO extends Bundle { val ser_busy = Output(Bool()) val des_busy = Output(Bool()) } class TLSerdesser( val flitWidth: Int, clientPortParams: Option[TLMasterPortParameters], managerPortParams: Option[TLSlavePortParameters], val bundleParams: TLBundleParameters = TLSerdesser.STANDARD_TLBUNDLE_PARAMS, nameSuffix: Option[String] = None ) (implicit p: Parameters) extends LazyModule { require (clientPortParams.isDefined || managerPortParams.isDefined) val clientNode = clientPortParams.map { c => TLClientNode(Seq(c)) } val managerNode = managerPortParams.map { m => TLManagerNode(Seq(m)) } override lazy val desiredName = (Seq("TLSerdesser") ++ nameSuffix).mkString("_") lazy val module = new Impl class Impl extends LazyModuleImp(this) { val io = IO(new Bundle { val ser = Vec(5, new DecoupledFlitIO(flitWidth)) val debug = new SerdesDebugIO }) val client_tl = clientNode.map(_.out(0)._1).getOrElse(0.U.asTypeOf(new TLBundle(bundleParams))) val client_edge = clientNode.map(_.out(0)._2) val manager_tl = managerNode.map(_.in(0)._1).getOrElse(0.U.asTypeOf(new TLBundle(bundleParams))) val manager_edge = managerNode.map(_.in(0)._2) val clientParams = client_edge.map(_.bundle).getOrElse(bundleParams) val managerParams = manager_edge.map(_.bundle).getOrElse(bundleParams) val mergedParams = clientParams.union(managerParams).union(bundleParams) require(mergedParams.echoFields.isEmpty, "TLSerdesser does not support TileLink with echo fields") require(mergedParams.requestFields.isEmpty, "TLSerdesser does not support TileLink with request fields") require(mergedParams.responseFields.isEmpty, "TLSerdesser does not support TileLink with response fields") require(mergedParams == bundleParams, s"TLSerdesser is misconfigured, the combined inwards/outwards parameters cannot be serialized using the provided bundle params\n$mergedParams > $bundleParams") val out_channels = Seq( (manager_tl.e, manager_edge.map(e => Module(new TLEToBeat(e, mergedParams, nameSuffix)))), (client_tl.d, client_edge.map (e => Module(new TLDToBeat(e, mergedParams, nameSuffix)))), (manager_tl.c, manager_edge.map(e => Module(new TLCToBeat(e, mergedParams, nameSuffix)))), (client_tl.b, client_edge.map (e => Module(new TLBToBeat(e, mergedParams, nameSuffix)))), (manager_tl.a, manager_edge.map(e => Module(new TLAToBeat(e, mergedParams, nameSuffix)))) ) io.ser.map(_.out.valid := false.B) io.ser.map(_.out.bits := DontCare) val out_sers = out_channels.zipWithIndex.map { case ((c,b),i) => b.map { b => b.io.protocol <> c val ser = Module(new GenericSerializer(b.io.beat.bits.cloneType, flitWidth)).suggestName(s"ser_$i") ser.io.in <> b.io.beat io.ser(i).out <> ser.io.out ser }}.flatten io.debug.ser_busy := out_sers.map(_.io.busy).orR val in_channels = Seq( (client_tl.e, Module(new TLEFromBeat(mergedParams, nameSuffix))), (manager_tl.d, Module(new TLDFromBeat(mergedParams, nameSuffix))), (client_tl.c, Module(new TLCFromBeat(mergedParams, nameSuffix))), (manager_tl.b, Module(new TLBFromBeat(mergedParams, nameSuffix))), (client_tl.a, Module(new TLAFromBeat(mergedParams, nameSuffix))) ) val in_desers = in_channels.zipWithIndex.map { case ((c,b),i) => c <> b.io.protocol val des = Module(new GenericDeserializer(b.io.beat.bits.cloneType, flitWidth)).suggestName(s"des_$i") des.io.in <> io.ser(i).in b.io.beat <> des.io.out des } io.debug.des_busy := in_desers.map(_.io.busy).orR } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module TLSerdesser_SerialRAM( // @[TLSerdes.scala:39:9] input clock, // @[TLSerdes.scala:39:9] input reset, // @[TLSerdes.scala:39:9] output auto_manager_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_manager_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_manager_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_manager_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_manager_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input auto_manager_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_manager_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_manager_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_manager_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_manager_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_manager_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_manager_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_manager_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_manager_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [3:0] auto_manager_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output auto_manager_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [2:0] auto_manager_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_manager_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_manager_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_manager_in_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output io_ser_0_in_ready, // @[TLSerdes.scala:40:16] input io_ser_0_in_valid, // @[TLSerdes.scala:40:16] input [31:0] io_ser_0_in_bits_flit, // @[TLSerdes.scala:40:16] input io_ser_0_out_ready, // @[TLSerdes.scala:40:16] output [31:0] io_ser_0_out_bits_flit, // @[TLSerdes.scala:40:16] output io_ser_1_in_ready, // @[TLSerdes.scala:40:16] input io_ser_1_in_valid, // @[TLSerdes.scala:40:16] input [31:0] io_ser_1_in_bits_flit, // @[TLSerdes.scala:40:16] output io_ser_2_in_ready, // @[TLSerdes.scala:40:16] input io_ser_2_in_valid, // @[TLSerdes.scala:40:16] input [31:0] io_ser_2_in_bits_flit, // @[TLSerdes.scala:40:16] input io_ser_2_out_ready, // @[TLSerdes.scala:40:16] output io_ser_2_out_valid, // @[TLSerdes.scala:40:16] output [31:0] io_ser_2_out_bits_flit, // @[TLSerdes.scala:40:16] output io_ser_3_in_ready, // @[TLSerdes.scala:40:16] input io_ser_3_in_valid, // @[TLSerdes.scala:40:16] input [31:0] io_ser_3_in_bits_flit, // @[TLSerdes.scala:40:16] output io_ser_4_in_ready, // @[TLSerdes.scala:40:16] input io_ser_4_in_valid, // @[TLSerdes.scala:40:16] input [31:0] io_ser_4_in_bits_flit, // @[TLSerdes.scala:40:16] input io_ser_4_out_ready, // @[TLSerdes.scala:40:16] output io_ser_4_out_valid, // @[TLSerdes.scala:40:16] output [31:0] io_ser_4_out_bits_flit // @[TLSerdes.scala:40:16] ); wire _des_4_io_out_valid; // @[TLSerdes.scala:86:23] wire [85:0] _des_4_io_out_bits_payload; // @[TLSerdes.scala:86:23] wire _des_4_io_out_bits_head; // @[TLSerdes.scala:86:23] wire _des_4_io_out_bits_tail; // @[TLSerdes.scala:86:23] wire _des_4_io_busy; // @[TLSerdes.scala:86:23] wire _des_3_io_out_valid; // @[TLSerdes.scala:86:23] wire [84:0] _des_3_io_out_bits_payload; // @[TLSerdes.scala:86:23] wire _des_3_io_out_bits_head; // @[TLSerdes.scala:86:23] wire _des_3_io_out_bits_tail; // @[TLSerdes.scala:86:23] wire _des_3_io_busy; // @[TLSerdes.scala:86:23] wire _des_2_io_out_valid; // @[TLSerdes.scala:86:23] wire [85:0] _des_2_io_out_bits_payload; // @[TLSerdes.scala:86:23] wire _des_2_io_out_bits_head; // @[TLSerdes.scala:86:23] wire _des_2_io_out_bits_tail; // @[TLSerdes.scala:86:23] wire _des_2_io_busy; // @[TLSerdes.scala:86:23] wire _des_1_io_out_valid; // @[TLSerdes.scala:86:23] wire [64:0] _des_1_io_out_bits_payload; // @[TLSerdes.scala:86:23] wire _des_1_io_out_bits_head; // @[TLSerdes.scala:86:23] wire _des_1_io_out_bits_tail; // @[TLSerdes.scala:86:23] wire _des_0_io_out_valid; // @[TLSerdes.scala:86:23] wire [7:0] _des_0_io_out_bits_payload; // @[TLSerdes.scala:86:23] wire _des_0_io_out_bits_head; // @[TLSerdes.scala:86:23] wire _des_0_io_out_bits_tail; // @[TLSerdes.scala:86:23] wire _in_channels_4_2_io_beat_ready; // @[TLSerdes.scala:82:28] wire [7:0] _in_channels_3_2_io_protocol_bits_size; // @[TLSerdes.scala:81:28] wire [7:0] _in_channels_3_2_io_protocol_bits_source; // @[TLSerdes.scala:81:28] wire [63:0] _in_channels_3_2_io_protocol_bits_address; // @[TLSerdes.scala:81:28] wire _in_channels_3_2_io_beat_ready; // @[TLSerdes.scala:81:28] wire _in_channels_2_2_io_beat_ready; // @[TLSerdes.scala:80:28] wire [7:0] _in_channels_1_2_io_protocol_bits_size; // @[TLSerdes.scala:79:28] wire [7:0] _in_channels_1_2_io_protocol_bits_source; // @[TLSerdes.scala:79:28] wire [7:0] _in_channels_1_2_io_protocol_bits_sink; // @[TLSerdes.scala:79:28] wire _in_channels_1_2_io_beat_ready; // @[TLSerdes.scala:79:28] wire _in_channels_0_2_io_beat_ready; // @[TLSerdes.scala:78:28] wire _ser_4_io_in_ready; // @[TLSerdes.scala:69:23] wire _ser_4_io_busy; // @[TLSerdes.scala:69:23] wire _ser_2_io_in_ready; // @[TLSerdes.scala:69:23] wire _ser_0_io_in_ready; // @[TLSerdes.scala:69:23] wire _out_channels_4_2_io_beat_valid; // @[TLSerdes.scala:63:50] wire [85:0] _out_channels_4_2_io_beat_bits_payload; // @[TLSerdes.scala:63:50] wire _out_channels_4_2_io_beat_bits_head; // @[TLSerdes.scala:63:50] wire _out_channels_4_2_io_beat_bits_tail; // @[TLSerdes.scala:63:50] wire _out_channels_2_2_io_beat_bits_head; // @[TLSerdes.scala:61:50] wire _out_channels_0_2_io_beat_bits_head; // @[TLSerdes.scala:59:50] wire auto_manager_in_a_valid_0 = auto_manager_in_a_valid; // @[TLSerdes.scala:39:9] wire [2:0] auto_manager_in_a_bits_opcode_0 = auto_manager_in_a_bits_opcode; // @[TLSerdes.scala:39:9] wire [2:0] auto_manager_in_a_bits_param_0 = auto_manager_in_a_bits_param; // @[TLSerdes.scala:39:9] wire [3:0] auto_manager_in_a_bits_size_0 = auto_manager_in_a_bits_size; // @[TLSerdes.scala:39:9] wire auto_manager_in_a_bits_source_0 = auto_manager_in_a_bits_source; // @[TLSerdes.scala:39:9] wire [31:0] auto_manager_in_a_bits_address_0 = auto_manager_in_a_bits_address; // @[TLSerdes.scala:39:9] wire [7:0] auto_manager_in_a_bits_mask_0 = auto_manager_in_a_bits_mask; // @[TLSerdes.scala:39:9] wire [63:0] auto_manager_in_a_bits_data_0 = auto_manager_in_a_bits_data; // @[TLSerdes.scala:39:9] wire auto_manager_in_a_bits_corrupt_0 = auto_manager_in_a_bits_corrupt; // @[TLSerdes.scala:39:9] wire auto_manager_in_d_ready_0 = auto_manager_in_d_ready; // @[TLSerdes.scala:39:9] wire io_ser_0_in_valid_0 = io_ser_0_in_valid; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_0_in_bits_flit_0 = io_ser_0_in_bits_flit; // @[TLSerdes.scala:39:9] wire io_ser_0_out_ready_0 = io_ser_0_out_ready; // @[TLSerdes.scala:39:9] wire io_ser_1_in_valid_0 = io_ser_1_in_valid; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_1_in_bits_flit_0 = io_ser_1_in_bits_flit; // @[TLSerdes.scala:39:9] wire io_ser_2_in_valid_0 = io_ser_2_in_valid; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_2_in_bits_flit_0 = io_ser_2_in_bits_flit; // @[TLSerdes.scala:39:9] wire io_ser_2_out_ready_0 = io_ser_2_out_ready; // @[TLSerdes.scala:39:9] wire io_ser_3_in_valid_0 = io_ser_3_in_valid; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_3_in_bits_flit_0 = io_ser_3_in_bits_flit; // @[TLSerdes.scala:39:9] wire io_ser_4_in_valid_0 = io_ser_4_in_valid; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_4_in_bits_flit_0 = io_ser_4_in_bits_flit; // @[TLSerdes.scala:39:9] wire io_ser_4_out_ready_0 = io_ser_4_out_ready; // @[TLSerdes.scala:39:9] wire [2:0] client_tl_b_bits_opcode = 3'h0; // @[TLSerdes.scala:45:71] wire [2:0] client_tl_d_bits_opcode = 3'h0; // @[TLSerdes.scala:45:71] wire [2:0] _out_channels_WIRE_bits_sink = 3'h0; // @[Bundles.scala:267:74] wire [2:0] out_channels_0_1_bits_sink = 3'h0; // @[Bundles.scala:267:61] wire [2:0] _out_channels_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _out_channels_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] out_channels_2_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] out_channels_2_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _in_channels_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:264:74] wire [1:0] client_tl_b_bits_param = 2'h0; // @[TLSerdes.scala:45:71] wire [1:0] client_tl_d_bits_param = 2'h0; // @[TLSerdes.scala:45:71] wire [1:0] _in_channels_WIRE_bits_param = 2'h0; // @[Bundles.scala:264:74] wire [3:0] _out_channels_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] out_channels_2_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _in_channels_WIRE_bits_size = 4'h0; // @[Bundles.scala:264:74] wire [7:0] client_tl_b_bits_size = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_b_bits_source = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_b_bits_mask = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_d_bits_size = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_d_bits_source = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_d_bits_sink = 8'h0; // @[TLSerdes.scala:45:71] wire [7:0] _in_channels_WIRE_bits_mask = 8'h0; // @[Bundles.scala:264:74] wire [63:0] client_tl_b_bits_address = 64'h0; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_b_bits_data = 64'h0; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_d_bits_data = 64'h0; // @[TLSerdes.scala:45:71] wire [63:0] _out_channels_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] out_channels_2_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _in_channels_WIRE_bits_data = 64'h0; // @[Bundles.scala:264:74] wire [31:0] io_ser_1_out_bits_flit = 32'h0; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_3_out_bits_flit = 32'h0; // @[TLSerdes.scala:39:9] wire [31:0] _out_channels_WIRE_1_bits_address = 32'h0; // @[Bundles.scala:265:74] wire [31:0] out_channels_2_1_bits_address = 32'h0; // @[Bundles.scala:265:61] wire [31:0] _in_channels_WIRE_bits_address = 32'h0; // @[Bundles.scala:264:74] wire io_ser_1_out_ready = 1'h1; // @[TLSerdes.scala:39:9, :40:16, :59:50, :61:50] wire io_ser_3_out_ready = 1'h1; // @[TLSerdes.scala:39:9, :40:16, :59:50, :61:50] wire out_channels_0_1_ready = 1'h1; // @[TLSerdes.scala:39:9, :40:16, :59:50, :61:50] wire out_channels_2_1_ready = 1'h1; // @[TLSerdes.scala:39:9, :40:16, :59:50, :61:50] wire io_ser_0_out_valid = 1'h0; // @[TLSerdes.scala:39:9] wire io_ser_1_out_valid = 1'h0; // @[TLSerdes.scala:39:9] wire io_ser_3_out_valid = 1'h0; // @[TLSerdes.scala:39:9] wire managerNodeIn_a_ready; // @[MixedNode.scala:551:17] wire client_tl_a_ready = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_b_ready = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_b_valid = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_b_bits_corrupt = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_c_ready = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_d_ready = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_d_valid = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_d_bits_denied = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_d_bits_corrupt = 1'h0; // @[TLSerdes.scala:45:71] wire client_tl_e_ready = 1'h0; // @[TLSerdes.scala:45:71] wire _out_channels_WIRE_ready = 1'h0; // @[Bundles.scala:267:74] wire _out_channels_WIRE_valid = 1'h0; // @[Bundles.scala:267:74] wire out_channels_0_1_valid = 1'h0; // @[Bundles.scala:267:61] wire _out_channels_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:74] wire _out_channels_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:74] wire _out_channels_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _out_channels_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire out_channels_2_1_valid = 1'h0; // @[Bundles.scala:265:61] wire out_channels_2_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire out_channels_2_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _in_channels_WIRE_ready = 1'h0; // @[Bundles.scala:264:74] wire _in_channels_WIRE_valid = 1'h0; // @[Bundles.scala:264:74] wire _in_channels_WIRE_bits_source = 1'h0; // @[Bundles.scala:264:74] wire _in_channels_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:264:74] wire in_channels_3_1_ready = 1'h0; // @[Bundles.scala:264:61] wire managerNodeIn_a_valid = auto_manager_in_a_valid_0; // @[TLSerdes.scala:39:9] wire [2:0] managerNodeIn_a_bits_opcode = auto_manager_in_a_bits_opcode_0; // @[TLSerdes.scala:39:9] wire [2:0] managerNodeIn_a_bits_param = auto_manager_in_a_bits_param_0; // @[TLSerdes.scala:39:9] wire [3:0] managerNodeIn_a_bits_size = auto_manager_in_a_bits_size_0; // @[TLSerdes.scala:39:9] wire managerNodeIn_a_bits_source = auto_manager_in_a_bits_source_0; // @[TLSerdes.scala:39:9] wire [31:0] managerNodeIn_a_bits_address = auto_manager_in_a_bits_address_0; // @[TLSerdes.scala:39:9] wire [7:0] managerNodeIn_a_bits_mask = auto_manager_in_a_bits_mask_0; // @[TLSerdes.scala:39:9] wire [63:0] managerNodeIn_a_bits_data = auto_manager_in_a_bits_data_0; // @[TLSerdes.scala:39:9] wire managerNodeIn_a_bits_corrupt = auto_manager_in_a_bits_corrupt_0; // @[TLSerdes.scala:39:9] wire managerNodeIn_d_ready = auto_manager_in_d_ready_0; // @[TLSerdes.scala:39:9] wire managerNodeIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] managerNodeIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] managerNodeIn_d_bits_param; // @[MixedNode.scala:551:17] wire [3:0] managerNodeIn_d_bits_size; // @[MixedNode.scala:551:17] wire managerNodeIn_d_bits_source; // @[MixedNode.scala:551:17] wire [2:0] managerNodeIn_d_bits_sink; // @[MixedNode.scala:551:17] wire managerNodeIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] managerNodeIn_d_bits_data; // @[MixedNode.scala:551:17] wire managerNodeIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire _io_debug_ser_busy_T_1; // @[package.scala:81:59] wire _io_debug_des_busy_T_3; // @[package.scala:81:59] wire auto_manager_in_a_ready_0; // @[TLSerdes.scala:39:9] wire [2:0] auto_manager_in_d_bits_opcode_0; // @[TLSerdes.scala:39:9] wire [1:0] auto_manager_in_d_bits_param_0; // @[TLSerdes.scala:39:9] wire [3:0] auto_manager_in_d_bits_size_0; // @[TLSerdes.scala:39:9] wire auto_manager_in_d_bits_source_0; // @[TLSerdes.scala:39:9] wire [2:0] auto_manager_in_d_bits_sink_0; // @[TLSerdes.scala:39:9] wire auto_manager_in_d_bits_denied_0; // @[TLSerdes.scala:39:9] wire [63:0] auto_manager_in_d_bits_data_0; // @[TLSerdes.scala:39:9] wire auto_manager_in_d_bits_corrupt_0; // @[TLSerdes.scala:39:9] wire auto_manager_in_d_valid_0; // @[TLSerdes.scala:39:9] wire io_ser_0_in_ready_0; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_0_out_bits_flit_0; // @[TLSerdes.scala:39:9] wire io_ser_1_in_ready_0; // @[TLSerdes.scala:39:9] wire io_ser_2_in_ready_0; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_2_out_bits_flit_0; // @[TLSerdes.scala:39:9] wire io_ser_2_out_valid_0; // @[TLSerdes.scala:39:9] wire io_ser_3_in_ready_0; // @[TLSerdes.scala:39:9] wire io_ser_4_in_ready_0; // @[TLSerdes.scala:39:9] wire [31:0] io_ser_4_out_bits_flit_0; // @[TLSerdes.scala:39:9] wire io_ser_4_out_valid_0; // @[TLSerdes.scala:39:9] wire io_debug_ser_busy; // @[TLSerdes.scala:39:9] wire io_debug_des_busy; // @[TLSerdes.scala:39:9] assign auto_manager_in_a_ready_0 = managerNodeIn_a_ready; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_valid_0 = managerNodeIn_d_valid; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_opcode_0 = managerNodeIn_d_bits_opcode; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_param_0 = managerNodeIn_d_bits_param; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_size_0 = managerNodeIn_d_bits_size; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_source_0 = managerNodeIn_d_bits_source; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_sink_0 = managerNodeIn_d_bits_sink; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_denied_0 = managerNodeIn_d_bits_denied; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_data_0 = managerNodeIn_d_bits_data; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_corrupt_0 = managerNodeIn_d_bits_corrupt; // @[TLSerdes.scala:39:9] wire [2:0] client_tl_a_bits_opcode; // @[TLSerdes.scala:45:71] wire [2:0] client_tl_a_bits_param; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_a_bits_size; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_a_bits_source; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_a_bits_address; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_a_bits_mask; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_a_bits_data; // @[TLSerdes.scala:45:71] wire client_tl_a_bits_corrupt; // @[TLSerdes.scala:45:71] wire client_tl_a_valid; // @[TLSerdes.scala:45:71] wire [2:0] client_tl_c_bits_opcode; // @[TLSerdes.scala:45:71] wire [2:0] client_tl_c_bits_param; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_c_bits_size; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_c_bits_source; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_c_bits_address; // @[TLSerdes.scala:45:71] wire [63:0] client_tl_c_bits_data; // @[TLSerdes.scala:45:71] wire client_tl_c_bits_corrupt; // @[TLSerdes.scala:45:71] wire client_tl_c_valid; // @[TLSerdes.scala:45:71] wire [7:0] client_tl_e_bits_sink; // @[TLSerdes.scala:45:71] wire client_tl_e_valid; // @[TLSerdes.scala:45:71] wire _io_debug_ser_busy_T; // @[package.scala:81:59] assign _io_debug_ser_busy_T_1 = _io_debug_ser_busy_T | _ser_4_io_busy; // @[TLSerdes.scala:69:23] assign io_debug_ser_busy = _io_debug_ser_busy_T_1; // @[TLSerdes.scala:39:9] wire [2:0] in_channels_3_1_bits_opcode; // @[Bundles.scala:264:61] wire [1:0] in_channels_3_1_bits_param; // @[Bundles.scala:264:61] wire [3:0] in_channels_3_1_bits_size; // @[Bundles.scala:264:61] wire in_channels_3_1_bits_source; // @[Bundles.scala:264:61] wire [31:0] in_channels_3_1_bits_address; // @[Bundles.scala:264:61] wire [7:0] in_channels_3_1_bits_mask; // @[Bundles.scala:264:61] wire [63:0] in_channels_3_1_bits_data; // @[Bundles.scala:264:61] wire in_channels_3_1_bits_corrupt; // @[Bundles.scala:264:61] wire in_channels_3_1_valid; // @[Bundles.scala:264:61] assign managerNodeIn_d_bits_size = _in_channels_1_2_io_protocol_bits_size[3:0]; // @[TLSerdes.scala:79:28, :85:9] assign managerNodeIn_d_bits_source = _in_channels_1_2_io_protocol_bits_source[0]; // @[TLSerdes.scala:79:28, :85:9] assign managerNodeIn_d_bits_sink = _in_channels_1_2_io_protocol_bits_sink[2:0]; // @[TLSerdes.scala:79:28, :85:9] assign in_channels_3_1_bits_size = _in_channels_3_2_io_protocol_bits_size[3:0]; // @[TLSerdes.scala:81:28, :85:9] assign in_channels_3_1_bits_source = _in_channels_3_2_io_protocol_bits_source[0]; // @[TLSerdes.scala:81:28, :85:9] assign in_channels_3_1_bits_address = _in_channels_3_2_io_protocol_bits_address[31:0]; // @[TLSerdes.scala:81:28, :85:9] wire _io_debug_des_busy_T; // @[package.scala:81:59] wire _io_debug_des_busy_T_1 = _io_debug_des_busy_T | _des_2_io_busy; // @[TLSerdes.scala:86:23] wire _io_debug_des_busy_T_2 = _io_debug_des_busy_T_1 | _des_3_io_busy; // @[TLSerdes.scala:86:23] assign _io_debug_des_busy_T_3 = _io_debug_des_busy_T_2 | _des_4_io_busy; // @[TLSerdes.scala:86:23] assign io_debug_des_busy = _io_debug_des_busy_T_3; // @[TLSerdes.scala:39:9] TLMonitor_63 monitor ( // @[Nodes.scala:27:25] .clock (clock), .reset (reset), .io_in_a_ready (managerNodeIn_a_ready), // @[MixedNode.scala:551:17] .io_in_a_valid (managerNodeIn_a_valid), // @[MixedNode.scala:551:17] .io_in_a_bits_opcode (managerNodeIn_a_bits_opcode), // @[MixedNode.scala:551:17] .io_in_a_bits_param (managerNodeIn_a_bits_param), // @[MixedNode.scala:551:17] .io_in_a_bits_size (managerNodeIn_a_bits_size), // @[MixedNode.scala:551:17] .io_in_a_bits_source (managerNodeIn_a_bits_source), // @[MixedNode.scala:551:17] .io_in_a_bits_address (managerNodeIn_a_bits_address), // @[MixedNode.scala:551:17] .io_in_a_bits_mask (managerNodeIn_a_bits_mask), // @[MixedNode.scala:551:17] .io_in_a_bits_data (managerNodeIn_a_bits_data), // @[MixedNode.scala:551:17] .io_in_a_bits_corrupt (managerNodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17] .io_in_d_ready (managerNodeIn_d_ready), // @[MixedNode.scala:551:17] .io_in_d_valid (managerNodeIn_d_valid), // @[MixedNode.scala:551:17] .io_in_d_bits_opcode (managerNodeIn_d_bits_opcode), // @[MixedNode.scala:551:17] .io_in_d_bits_param (managerNodeIn_d_bits_param), // @[MixedNode.scala:551:17] .io_in_d_bits_size (managerNodeIn_d_bits_size), // @[MixedNode.scala:551:17] .io_in_d_bits_source (managerNodeIn_d_bits_source), // @[MixedNode.scala:551:17] .io_in_d_bits_sink (managerNodeIn_d_bits_sink), // @[MixedNode.scala:551:17] .io_in_d_bits_denied (managerNodeIn_d_bits_denied), // @[MixedNode.scala:551:17] .io_in_d_bits_data (managerNodeIn_d_bits_data), // @[MixedNode.scala:551:17] .io_in_d_bits_corrupt (managerNodeIn_d_bits_corrupt) // @[MixedNode.scala:551:17] ); // @[Nodes.scala:27:25] TLEToBeat_SerialRAM_a64d64s8k8z8c out_channels_0_2 ( // @[TLSerdes.scala:59:50] .clock (clock), .reset (reset), .io_beat_ready (_ser_0_io_in_ready), // @[TLSerdes.scala:69:23] .io_beat_bits_head (_out_channels_0_2_io_beat_bits_head) ); // @[TLSerdes.scala:59:50] TLCToBeat_SerialRAM_a64d64s8k8z8c out_channels_2_2 ( // @[TLSerdes.scala:61:50] .clock (clock), .reset (reset), .io_beat_ready (_ser_2_io_in_ready), // @[TLSerdes.scala:69:23] .io_beat_bits_head (_out_channels_2_2_io_beat_bits_head) ); // @[TLSerdes.scala:61:50] TLAToBeat_SerialRAM_a64d64s8k8z8c out_channels_4_2 ( // @[TLSerdes.scala:63:50] .clock (clock), .reset (reset), .io_protocol_ready (managerNodeIn_a_ready), .io_protocol_valid (managerNodeIn_a_valid), // @[MixedNode.scala:551:17] .io_protocol_bits_opcode (managerNodeIn_a_bits_opcode), // @[MixedNode.scala:551:17] .io_protocol_bits_param (managerNodeIn_a_bits_param), // @[MixedNode.scala:551:17] .io_protocol_bits_size ({4'h0, managerNodeIn_a_bits_size}), // @[TLSerdes.scala:68:21] .io_protocol_bits_source ({7'h0, managerNodeIn_a_bits_source}), // @[TLSerdes.scala:68:21] .io_protocol_bits_address ({32'h0, managerNodeIn_a_bits_address}), // @[TLSerdes.scala:68:21] .io_protocol_bits_mask (managerNodeIn_a_bits_mask), // @[MixedNode.scala:551:17] .io_protocol_bits_data (managerNodeIn_a_bits_data), // @[MixedNode.scala:551:17] .io_protocol_bits_corrupt (managerNodeIn_a_bits_corrupt), // @[MixedNode.scala:551:17] .io_beat_ready (_ser_4_io_in_ready), // @[TLSerdes.scala:69:23] .io_beat_valid (_out_channels_4_2_io_beat_valid), .io_beat_bits_payload (_out_channels_4_2_io_beat_bits_payload), .io_beat_bits_head (_out_channels_4_2_io_beat_bits_head), .io_beat_bits_tail (_out_channels_4_2_io_beat_bits_tail) ); // @[TLSerdes.scala:63:50] GenericSerializer_TLBeatw10_f32 ser_0 ( // @[TLSerdes.scala:69:23] .clock (clock), .reset (reset), .io_in_ready (_ser_0_io_in_ready), .io_in_bits_head (_out_channels_0_2_io_beat_bits_head), // @[TLSerdes.scala:59:50] .io_out_ready (io_ser_0_out_ready_0), // @[TLSerdes.scala:39:9] .io_out_bits_flit (io_ser_0_out_bits_flit_0) ); // @[TLSerdes.scala:69:23] GenericSerializer_TLBeatw88_f32 ser_2 ( // @[TLSerdes.scala:69:23] .clock (clock), .reset (reset), .io_in_ready (_ser_2_io_in_ready), .io_in_bits_head (_out_channels_2_2_io_beat_bits_head), // @[TLSerdes.scala:61:50] .io_out_ready (io_ser_2_out_ready_0), // @[TLSerdes.scala:39:9] .io_out_valid (io_ser_2_out_valid_0), .io_out_bits_flit (io_ser_2_out_bits_flit_0), .io_busy (_io_debug_ser_busy_T) ); // @[TLSerdes.scala:69:23] GenericSerializer_TLBeatw88_f32_1 ser_4 ( // @[TLSerdes.scala:69:23] .clock (clock), .reset (reset), .io_in_ready (_ser_4_io_in_ready), .io_in_valid (_out_channels_4_2_io_beat_valid), // @[TLSerdes.scala:63:50] .io_in_bits_payload (_out_channels_4_2_io_beat_bits_payload), // @[TLSerdes.scala:63:50] .io_in_bits_head (_out_channels_4_2_io_beat_bits_head), // @[TLSerdes.scala:63:50] .io_in_bits_tail (_out_channels_4_2_io_beat_bits_tail), // @[TLSerdes.scala:63:50] .io_out_ready (io_ser_4_out_ready_0), // @[TLSerdes.scala:39:9] .io_out_valid (io_ser_4_out_valid_0), .io_out_bits_flit (io_ser_4_out_bits_flit_0), .io_busy (_ser_4_io_busy) ); // @[TLSerdes.scala:69:23] TLEFromBeat_SerialRAM_a64d64s8k8z8c in_channels_0_2 ( // @[TLSerdes.scala:78:28] .clock (clock), .reset (reset), .io_protocol_valid (client_tl_e_valid), .io_protocol_bits_sink (client_tl_e_bits_sink), .io_beat_ready (_in_channels_0_2_io_beat_ready), .io_beat_valid (_des_0_io_out_valid), // @[TLSerdes.scala:86:23] .io_beat_bits_payload (_des_0_io_out_bits_payload), // @[TLSerdes.scala:86:23] .io_beat_bits_head (_des_0_io_out_bits_head), // @[TLSerdes.scala:86:23] .io_beat_bits_tail (_des_0_io_out_bits_tail) // @[TLSerdes.scala:86:23] ); // @[TLSerdes.scala:78:28] TLDFromBeat_SerialRAM_a64d64s8k8z8c in_channels_1_2 ( // @[TLSerdes.scala:79:28] .clock (clock), .reset (reset), .io_protocol_ready (managerNodeIn_d_ready), // @[MixedNode.scala:551:17] .io_protocol_valid (managerNodeIn_d_valid), .io_protocol_bits_opcode (managerNodeIn_d_bits_opcode), .io_protocol_bits_param (managerNodeIn_d_bits_param), .io_protocol_bits_size (_in_channels_1_2_io_protocol_bits_size), .io_protocol_bits_source (_in_channels_1_2_io_protocol_bits_source), .io_protocol_bits_sink (_in_channels_1_2_io_protocol_bits_sink), .io_protocol_bits_denied (managerNodeIn_d_bits_denied), .io_protocol_bits_data (managerNodeIn_d_bits_data), .io_protocol_bits_corrupt (managerNodeIn_d_bits_corrupt), .io_beat_ready (_in_channels_1_2_io_beat_ready), .io_beat_valid (_des_1_io_out_valid), // @[TLSerdes.scala:86:23] .io_beat_bits_payload (_des_1_io_out_bits_payload), // @[TLSerdes.scala:86:23] .io_beat_bits_head (_des_1_io_out_bits_head), // @[TLSerdes.scala:86:23] .io_beat_bits_tail (_des_1_io_out_bits_tail) // @[TLSerdes.scala:86:23] ); // @[TLSerdes.scala:79:28] TLCFromBeat_SerialRAM_a64d64s8k8z8c in_channels_2_2 ( // @[TLSerdes.scala:80:28] .clock (clock), .reset (reset), .io_protocol_valid (client_tl_c_valid), .io_protocol_bits_opcode (client_tl_c_bits_opcode), .io_protocol_bits_param (client_tl_c_bits_param), .io_protocol_bits_size (client_tl_c_bits_size), .io_protocol_bits_source (client_tl_c_bits_source), .io_protocol_bits_address (client_tl_c_bits_address), .io_protocol_bits_data (client_tl_c_bits_data), .io_protocol_bits_corrupt (client_tl_c_bits_corrupt), .io_beat_ready (_in_channels_2_2_io_beat_ready), .io_beat_valid (_des_2_io_out_valid), // @[TLSerdes.scala:86:23] .io_beat_bits_payload (_des_2_io_out_bits_payload), // @[TLSerdes.scala:86:23] .io_beat_bits_head (_des_2_io_out_bits_head), // @[TLSerdes.scala:86:23] .io_beat_bits_tail (_des_2_io_out_bits_tail) // @[TLSerdes.scala:86:23] ); // @[TLSerdes.scala:80:28] TLBFromBeat_SerialRAM_a64d64s8k8z8c in_channels_3_2 ( // @[TLSerdes.scala:81:28] .clock (clock), .reset (reset), .io_protocol_valid (in_channels_3_1_valid), .io_protocol_bits_opcode (in_channels_3_1_bits_opcode), .io_protocol_bits_param (in_channels_3_1_bits_param), .io_protocol_bits_size (_in_channels_3_2_io_protocol_bits_size), .io_protocol_bits_source (_in_channels_3_2_io_protocol_bits_source), .io_protocol_bits_address (_in_channels_3_2_io_protocol_bits_address), .io_protocol_bits_mask (in_channels_3_1_bits_mask), .io_protocol_bits_data (in_channels_3_1_bits_data), .io_protocol_bits_corrupt (in_channels_3_1_bits_corrupt), .io_beat_ready (_in_channels_3_2_io_beat_ready), .io_beat_valid (_des_3_io_out_valid), // @[TLSerdes.scala:86:23] .io_beat_bits_payload (_des_3_io_out_bits_payload), // @[TLSerdes.scala:86:23] .io_beat_bits_head (_des_3_io_out_bits_head), // @[TLSerdes.scala:86:23] .io_beat_bits_tail (_des_3_io_out_bits_tail) // @[TLSerdes.scala:86:23] ); // @[TLSerdes.scala:81:28] TLAFromBeat_SerialRAM_a64d64s8k8z8c in_channels_4_2 ( // @[TLSerdes.scala:82:28] .clock (clock), .reset (reset), .io_protocol_valid (client_tl_a_valid), .io_protocol_bits_opcode (client_tl_a_bits_opcode), .io_protocol_bits_param (client_tl_a_bits_param), .io_protocol_bits_size (client_tl_a_bits_size), .io_protocol_bits_source (client_tl_a_bits_source), .io_protocol_bits_address (client_tl_a_bits_address), .io_protocol_bits_mask (client_tl_a_bits_mask), .io_protocol_bits_data (client_tl_a_bits_data), .io_protocol_bits_corrupt (client_tl_a_bits_corrupt), .io_beat_ready (_in_channels_4_2_io_beat_ready), .io_beat_valid (_des_4_io_out_valid), // @[TLSerdes.scala:86:23] .io_beat_bits_payload (_des_4_io_out_bits_payload), // @[TLSerdes.scala:86:23] .io_beat_bits_head (_des_4_io_out_bits_head), // @[TLSerdes.scala:86:23] .io_beat_bits_tail (_des_4_io_out_bits_tail) // @[TLSerdes.scala:86:23] ); // @[TLSerdes.scala:82:28] GenericDeserializer_TLBeatw10_f32_1 des_0 ( // @[TLSerdes.scala:86:23] .clock (clock), .reset (reset), .io_in_ready (io_ser_0_in_ready_0), .io_in_valid (io_ser_0_in_valid_0), // @[TLSerdes.scala:39:9] .io_in_bits_flit (io_ser_0_in_bits_flit_0), // @[TLSerdes.scala:39:9] .io_out_ready (_in_channels_0_2_io_beat_ready), // @[TLSerdes.scala:78:28] .io_out_valid (_des_0_io_out_valid), .io_out_bits_payload (_des_0_io_out_bits_payload), .io_out_bits_head (_des_0_io_out_bits_head), .io_out_bits_tail (_des_0_io_out_bits_tail) ); // @[TLSerdes.scala:86:23] GenericDeserializer_TLBeatw67_f32_1 des_1 ( // @[TLSerdes.scala:86:23] .clock (clock), .reset (reset), .io_in_ready (io_ser_1_in_ready_0), .io_in_valid (io_ser_1_in_valid_0), // @[TLSerdes.scala:39:9] .io_in_bits_flit (io_ser_1_in_bits_flit_0), // @[TLSerdes.scala:39:9] .io_out_ready (_in_channels_1_2_io_beat_ready), // @[TLSerdes.scala:79:28] .io_out_valid (_des_1_io_out_valid), .io_out_bits_payload (_des_1_io_out_bits_payload), .io_out_bits_head (_des_1_io_out_bits_head), .io_out_bits_tail (_des_1_io_out_bits_tail), .io_busy (_io_debug_des_busy_T) ); // @[TLSerdes.scala:86:23] GenericDeserializer_TLBeatw88_f32_2 des_2 ( // @[TLSerdes.scala:86:23] .clock (clock), .reset (reset), .io_in_ready (io_ser_2_in_ready_0), .io_in_valid (io_ser_2_in_valid_0), // @[TLSerdes.scala:39:9] .io_in_bits_flit (io_ser_2_in_bits_flit_0), // @[TLSerdes.scala:39:9] .io_out_ready (_in_channels_2_2_io_beat_ready), // @[TLSerdes.scala:80:28] .io_out_valid (_des_2_io_out_valid), .io_out_bits_payload (_des_2_io_out_bits_payload), .io_out_bits_head (_des_2_io_out_bits_head), .io_out_bits_tail (_des_2_io_out_bits_tail), .io_busy (_des_2_io_busy) ); // @[TLSerdes.scala:86:23] GenericDeserializer_TLBeatw87_f32_1 des_3 ( // @[TLSerdes.scala:86:23] .clock (clock), .reset (reset), .io_in_ready (io_ser_3_in_ready_0), .io_in_valid (io_ser_3_in_valid_0), // @[TLSerdes.scala:39:9] .io_in_bits_flit (io_ser_3_in_bits_flit_0), // @[TLSerdes.scala:39:9] .io_out_ready (_in_channels_3_2_io_beat_ready), // @[TLSerdes.scala:81:28] .io_out_valid (_des_3_io_out_valid), .io_out_bits_payload (_des_3_io_out_bits_payload), .io_out_bits_head (_des_3_io_out_bits_head), .io_out_bits_tail (_des_3_io_out_bits_tail), .io_busy (_des_3_io_busy) ); // @[TLSerdes.scala:86:23] GenericDeserializer_TLBeatw88_f32_3 des_4 ( // @[TLSerdes.scala:86:23] .clock (clock), .reset (reset), .io_in_ready (io_ser_4_in_ready_0), .io_in_valid (io_ser_4_in_valid_0), // @[TLSerdes.scala:39:9] .io_in_bits_flit (io_ser_4_in_bits_flit_0), // @[TLSerdes.scala:39:9] .io_out_ready (_in_channels_4_2_io_beat_ready), // @[TLSerdes.scala:82:28] .io_out_valid (_des_4_io_out_valid), .io_out_bits_payload (_des_4_io_out_bits_payload), .io_out_bits_head (_des_4_io_out_bits_head), .io_out_bits_tail (_des_4_io_out_bits_tail), .io_busy (_des_4_io_busy) ); // @[TLSerdes.scala:86:23] assign auto_manager_in_a_ready = auto_manager_in_a_ready_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_valid = auto_manager_in_d_valid_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_opcode = auto_manager_in_d_bits_opcode_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_param = auto_manager_in_d_bits_param_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_size = auto_manager_in_d_bits_size_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_source = auto_manager_in_d_bits_source_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_sink = auto_manager_in_d_bits_sink_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_denied = auto_manager_in_d_bits_denied_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_data = auto_manager_in_d_bits_data_0; // @[TLSerdes.scala:39:9] assign auto_manager_in_d_bits_corrupt = auto_manager_in_d_bits_corrupt_0; // @[TLSerdes.scala:39:9] assign io_ser_0_in_ready = io_ser_0_in_ready_0; // @[TLSerdes.scala:39:9] assign io_ser_0_out_bits_flit = io_ser_0_out_bits_flit_0; // @[TLSerdes.scala:39:9] assign io_ser_1_in_ready = io_ser_1_in_ready_0; // @[TLSerdes.scala:39:9] assign io_ser_2_in_ready = io_ser_2_in_ready_0; // @[TLSerdes.scala:39:9] assign io_ser_2_out_valid = io_ser_2_out_valid_0; // @[TLSerdes.scala:39:9] assign io_ser_2_out_bits_flit = io_ser_2_out_bits_flit_0; // @[TLSerdes.scala:39:9] assign io_ser_3_in_ready = io_ser_3_in_ready_0; // @[TLSerdes.scala:39:9] assign io_ser_4_in_ready = io_ser_4_in_ready_0; // @[TLSerdes.scala:39:9] assign io_ser_4_out_valid = io_ser_4_out_valid_0; // @[TLSerdes.scala:39:9] assign io_ser_4_out_bits_flit = io_ser_4_out_bits_flit_0; // @[TLSerdes.scala:39:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File SwitchAllocator.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ class SwitchAllocReq(val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams]) (implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) val tail = Bool() } class SwitchArbiter(inN: Int, outN: Int, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams])(implicit val p: Parameters) extends Module { val io = IO(new Bundle { val in = Flipped(Vec(inN, Decoupled(new SwitchAllocReq(outParams, egressParams)))) val out = Vec(outN, Decoupled(new SwitchAllocReq(outParams, egressParams))) val chosen_oh = Vec(outN, Output(UInt(inN.W))) }) val lock = Seq.fill(outN) { RegInit(0.U(inN.W)) } val unassigned = Cat(io.in.map(_.valid).reverse) & ~(lock.reduce(_|_)) val mask = RegInit(0.U(inN.W)) val choices = Wire(Vec(outN, UInt(inN.W))) var sel = PriorityEncoderOH(Cat(unassigned, unassigned & ~mask)) for (i <- 0 until outN) { choices(i) := sel | (sel >> inN) sel = PriorityEncoderOH(unassigned & ~choices(i)) } io.in.foreach(_.ready := false.B) var chosens = 0.U(inN.W) val in_tails = Cat(io.in.map(_.bits.tail).reverse) for (i <- 0 until outN) { val in_valids = Cat((0 until inN).map { j => io.in(j).valid && !chosens(j) }.reverse) val chosen = Mux((in_valids & lock(i) & ~chosens).orR, lock(i), choices(i)) io.chosen_oh(i) := chosen io.out(i).valid := (in_valids & chosen).orR io.out(i).bits := Mux1H(chosen, io.in.map(_.bits)) for (j <- 0 until inN) { when (chosen(j) && io.out(i).ready) { io.in(j).ready := true.B } } chosens = chosens | chosen when (io.out(i).fire) { lock(i) := chosen & ~in_tails } } when (io.out(0).fire) { mask := (0 until inN).map { i => (io.chosen_oh(0) >> i) }.reduce(_|_) } .otherwise { mask := Mux(~mask === 0.U, 0.U, (mask << 1) | 1.U(1.W)) } } class SwitchAllocator( val routerParams: RouterParams, val inParams: Seq[ChannelParams], val outParams: Seq[ChannelParams], val ingressParams: Seq[IngressChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterParams with HasRouterInputParams with HasRouterOutputParams { val io = IO(new Bundle { val req = MixedVec(allInParams.map(u => Vec(u.destSpeedup, Flipped(Decoupled(new SwitchAllocReq(outParams, egressParams)))))) val credit_alloc = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Output(new OutputCreditAlloc))}) val switch_sel = MixedVec(allOutParams.map { o => Vec(o.srcSpeedup, MixedVec(allInParams.map { i => Vec(i.destSpeedup, Output(Bool())) })) }) }) val nInputChannels = allInParams.map(_.nVirtualChannels).sum val arbs = allOutParams.map { oP => Module(new SwitchArbiter( allInParams.map(_.destSpeedup).reduce(_+_), oP.srcSpeedup, outParams, egressParams ))} arbs.foreach(_.io.out.foreach(_.ready := true.B)) var idx = 0 io.req.foreach(_.foreach { o => val fires = Wire(Vec(arbs.size, Bool())) arbs.zipWithIndex.foreach { case (a,i) => a.io.in(idx).valid := o.valid && o.bits.vc_sel(i).reduce(_||_) a.io.in(idx).bits := o.bits fires(i) := a.io.in(idx).fire } o.ready := fires.reduce(_||_) idx += 1 }) for (i <- 0 until nAllOutputs) { for (j <- 0 until allOutParams(i).srcSpeedup) { idx = 0 for (m <- 0 until nAllInputs) { for (n <- 0 until allInParams(m).destSpeedup) { io.switch_sel(i)(j)(m)(n) := arbs(i).io.in(idx).valid && arbs(i).io.chosen_oh(j)(idx) && arbs(i).io.out(j).valid idx += 1 } } } } io.credit_alloc.foreach(_.foreach(_.alloc := false.B)) io.credit_alloc.foreach(_.foreach(_.tail := false.B)) (arbs zip io.credit_alloc).zipWithIndex.map { case ((a,i),t) => for (j <- 0 until i.size) { for (k <- 0 until a.io.out.size) { when (a.io.out(k).valid && a.io.out(k).bits.vc_sel(t)(j)) { i(j).alloc := true.B i(j).tail := a.io.out(k).bits.tail } } } } }
module SwitchArbiter( // @[SwitchAllocator.scala:17:7] input clock, // @[SwitchAllocator.scala:17:7] input reset, // @[SwitchAllocator.scala:17:7] output io_in_2_ready, // @[SwitchAllocator.scala:18:14] input io_in_2_valid, // @[SwitchAllocator.scala:18:14] input io_in_2_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_2_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_2_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_2_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_2_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_2_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_3_ready, // @[SwitchAllocator.scala:18:14] input io_in_3_valid, // @[SwitchAllocator.scala:18:14] input io_in_3_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_3_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_3_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_3_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_3_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_3_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_8_ready, // @[SwitchAllocator.scala:18:14] input io_in_8_valid, // @[SwitchAllocator.scala:18:14] input io_in_8_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_8_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_8_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_8_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_8_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_8_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_9_ready, // @[SwitchAllocator.scala:18:14] input io_in_9_valid, // @[SwitchAllocator.scala:18:14] input io_in_9_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_9_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_9_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_9_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_9_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_9_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_10_ready, // @[SwitchAllocator.scala:18:14] input io_in_10_valid, // @[SwitchAllocator.scala:18:14] input io_in_10_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_10_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_10_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_10_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_10_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_10_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_11_ready, // @[SwitchAllocator.scala:18:14] input io_in_11_valid, // @[SwitchAllocator.scala:18:14] input io_in_11_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_11_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_11_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_11_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_11_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_11_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_12_ready, // @[SwitchAllocator.scala:18:14] input io_in_12_valid, // @[SwitchAllocator.scala:18:14] input io_in_12_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_12_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_12_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_12_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_12_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_12_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_13_ready, // @[SwitchAllocator.scala:18:14] input io_in_13_valid, // @[SwitchAllocator.scala:18:14] input io_in_13_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_13_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_13_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_13_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_13_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_13_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_14_ready, // @[SwitchAllocator.scala:18:14] input io_in_14_valid, // @[SwitchAllocator.scala:18:14] input io_in_14_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_14_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_14_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_14_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_14_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_14_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_15_ready, // @[SwitchAllocator.scala:18:14] input io_in_15_valid, // @[SwitchAllocator.scala:18:14] input io_in_15_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_15_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_15_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_15_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_15_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_15_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_18_ready, // @[SwitchAllocator.scala:18:14] input io_in_18_valid, // @[SwitchAllocator.scala:18:14] input io_in_18_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_18_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_18_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_18_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_18_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_18_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_19_ready, // @[SwitchAllocator.scala:18:14] input io_in_19_valid, // @[SwitchAllocator.scala:18:14] input io_in_19_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_19_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_19_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_19_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_19_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_19_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_20_ready, // @[SwitchAllocator.scala:18:14] input io_in_20_valid, // @[SwitchAllocator.scala:18:14] input io_in_20_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_20_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_20_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_20_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_20_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_20_bits_tail, // @[SwitchAllocator.scala:18:14] output io_in_21_ready, // @[SwitchAllocator.scala:18:14] input io_in_21_valid, // @[SwitchAllocator.scala:18:14] input io_in_21_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] input io_in_21_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] input io_in_21_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] input io_in_21_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] input io_in_21_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] input io_in_21_bits_tail, // @[SwitchAllocator.scala:18:14] input io_out_0_ready, // @[SwitchAllocator.scala:18:14] output io_out_0_valid, // @[SwitchAllocator.scala:18:14] output io_out_0_bits_vc_sel_5_0, // @[SwitchAllocator.scala:18:14] output io_out_0_bits_vc_sel_4_0, // @[SwitchAllocator.scala:18:14] output io_out_0_bits_vc_sel_3_0, // @[SwitchAllocator.scala:18:14] output io_out_0_bits_vc_sel_2_0, // @[SwitchAllocator.scala:18:14] output io_out_0_bits_vc_sel_1_0, // @[SwitchAllocator.scala:18:14] output io_out_0_bits_tail, // @[SwitchAllocator.scala:18:14] output [21:0] io_chosen_oh_0 // @[SwitchAllocator.scala:18:14] ); reg [21:0] lock_0; // @[SwitchAllocator.scala:24:38] wire [21:0] unassigned = {io_in_21_valid, io_in_20_valid, io_in_19_valid, io_in_18_valid, 2'h0, io_in_15_valid, io_in_14_valid, io_in_13_valid, io_in_12_valid, io_in_11_valid, io_in_10_valid, io_in_9_valid, io_in_8_valid, 4'h0, io_in_3_valid, io_in_2_valid, 2'h0} & ~lock_0; // @[SwitchAllocator.scala:17:7, :24:38, :25:{23,52,54}] reg [21:0] mask; // @[SwitchAllocator.scala:27:21] wire [21:0] _sel_T_1 = unassigned & ~mask; // @[SwitchAllocator.scala:25:52, :27:21, :30:{58,60}] wire [43:0] sel = _sel_T_1[0] ? 44'h1 : _sel_T_1[1] ? 44'h2 : _sel_T_1[2] ? 44'h4 : _sel_T_1[3] ? 44'h8 : _sel_T_1[4] ? 44'h10 : _sel_T_1[5] ? 44'h20 : _sel_T_1[6] ? 44'h40 : _sel_T_1[7] ? 44'h80 : _sel_T_1[8] ? 44'h100 : _sel_T_1[9] ? 44'h200 : _sel_T_1[10] ? 44'h400 : _sel_T_1[11] ? 44'h800 : _sel_T_1[12] ? 44'h1000 : _sel_T_1[13] ? 44'h2000 : _sel_T_1[14] ? 44'h4000 : _sel_T_1[15] ? 44'h8000 : _sel_T_1[16] ? 44'h10000 : _sel_T_1[17] ? 44'h20000 : _sel_T_1[18] ? 44'h40000 : _sel_T_1[19] ? 44'h80000 : _sel_T_1[20] ? 44'h100000 : _sel_T_1[21] ? 44'h200000 : unassigned[0] ? 44'h400000 : unassigned[1] ? 44'h800000 : unassigned[2] ? 44'h1000000 : unassigned[3] ? 44'h2000000 : unassigned[4] ? 44'h4000000 : unassigned[5] ? 44'h8000000 : unassigned[6] ? 44'h10000000 : unassigned[7] ? 44'h20000000 : unassigned[8] ? 44'h40000000 : unassigned[9] ? 44'h80000000 : unassigned[10] ? 44'h100000000 : unassigned[11] ? 44'h200000000 : unassigned[12] ? 44'h400000000 : unassigned[13] ? 44'h800000000 : unassigned[14] ? 44'h1000000000 : unassigned[15] ? 44'h2000000000 : unassigned[16] ? 44'h4000000000 : unassigned[17] ? 44'h8000000000 : unassigned[18] ? 44'h10000000000 : unassigned[19] ? 44'h20000000000 : unassigned[20] ? 44'h40000000000 : {unassigned[21], 43'h0}; // @[OneHot.scala:85:71] wire [19:0] _GEN = {io_in_21_valid, io_in_20_valid, io_in_19_valid, io_in_18_valid, 2'h0, io_in_15_valid, io_in_14_valid, io_in_13_valid, io_in_12_valid, io_in_11_valid, io_in_10_valid, io_in_9_valid, io_in_8_valid, 4'h0, io_in_3_valid, io_in_2_valid}; // @[SwitchAllocator.scala:17:7, :41:24] wire [19:0] _chosen_T_2 = _GEN & lock_0[21:2]; // @[SwitchAllocator.scala:24:38, :41:24, :42:33] wire [21:0] chosen = (|{_chosen_T_2[19:16], _chosen_T_2[13:6], _chosen_T_2[1:0]}) ? lock_0 : sel[21:0] | sel[43:22]; // @[Mux.scala:50:70] wire [19:0] _io_out_0_valid_T = _GEN & chosen[21:2]; // @[SwitchAllocator.scala:41:24, :42:21, :44:35] wire [13:0] _GEN_0 = {_io_out_0_valid_T[19:16], _io_out_0_valid_T[13:6], _io_out_0_valid_T[1:0]}; // @[SwitchAllocator.scala:44:35] wire _GEN_1 = io_out_0_ready & (|_GEN_0); // @[Decoupled.scala:51:35] wire [20:0] _GEN_2 = chosen[20:0] | chosen[21:1]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [19:0] _GEN_3 = _GEN_2[19:0] | chosen[21:2]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [18:0] _GEN_4 = _GEN_3[18:0] | chosen[21:3]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [17:0] _GEN_5 = _GEN_4[17:0] | chosen[21:4]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [16:0] _GEN_6 = _GEN_5[16:0] | chosen[21:5]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [15:0] _GEN_7 = _GEN_6[15:0] | chosen[21:6]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [14:0] _GEN_8 = _GEN_7[14:0] | chosen[21:7]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [13:0] _GEN_9 = _GEN_8[13:0] | chosen[21:8]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [12:0] _GEN_10 = _GEN_9[12:0] | chosen[21:9]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [11:0] _GEN_11 = _GEN_10[11:0] | chosen[21:10]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [10:0] _GEN_12 = _GEN_11[10:0] | chosen[21:11]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [9:0] _GEN_13 = _GEN_12[9:0] | chosen[21:12]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [8:0] _GEN_14 = _GEN_13[8:0] | chosen[21:13]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [7:0] _GEN_15 = _GEN_14[7:0] | chosen[21:14]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [6:0] _GEN_16 = _GEN_15[6:0] | chosen[21:15]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [5:0] _GEN_17 = _GEN_16[5:0] | chosen[21:16]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [4:0] _GEN_18 = _GEN_17[4:0] | chosen[21:17]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [3:0] _GEN_19 = _GEN_18[3:0] | chosen[21:18]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [2:0] _GEN_20 = _GEN_19[2:0] | chosen[21:19]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] wire [1:0] _GEN_21 = _GEN_20[1:0] | chosen[21:20]; // @[SwitchAllocator.scala:42:21, :58:{55,71}] always @(posedge clock) begin // @[SwitchAllocator.scala:17:7] if (reset) begin // @[SwitchAllocator.scala:17:7] lock_0 <= 22'h0; // @[SwitchAllocator.scala:24:38] mask <= 22'h0; // @[SwitchAllocator.scala:27:21] end else begin // @[SwitchAllocator.scala:17:7] if (_GEN_1) // @[Decoupled.scala:51:35] lock_0 <= chosen & {~io_in_21_bits_tail, ~io_in_20_bits_tail, ~io_in_19_bits_tail, ~io_in_18_bits_tail, 2'h3, ~io_in_15_bits_tail, ~io_in_14_bits_tail, ~io_in_13_bits_tail, ~io_in_12_bits_tail, ~io_in_11_bits_tail, ~io_in_10_bits_tail, ~io_in_9_bits_tail, ~io_in_8_bits_tail, 4'hF, ~io_in_3_bits_tail, ~io_in_2_bits_tail, 2'h3}; // @[SwitchAllocator.scala:24:38, :39:21, :42:21, :53:{25,27}] mask <= _GEN_1 ? {chosen[21], _GEN_2[20], _GEN_3[19], _GEN_4[18], _GEN_5[17], _GEN_6[16], _GEN_7[15], _GEN_8[14], _GEN_9[13], _GEN_10[12], _GEN_11[11], _GEN_12[10], _GEN_13[9], _GEN_14[8], _GEN_15[7], _GEN_16[6], _GEN_17[5], _GEN_18[4], _GEN_19[3], _GEN_20[2], _GEN_21[1], _GEN_21[0] | chosen[21]} : (&mask) ? 22'h0 : {mask[20:0], 1'h1}; // @[Decoupled.scala:51:35] end always @(posedge)
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.{ AddressDecoder, AddressSet, BufferParams, DirectedBuffers, IdMap, IdMapEntry, IdRange, RegionType, TransferSizes } import freechips.rocketchip.resources.{Resource, ResourceAddress, ResourcePermissions} import freechips.rocketchip.util.{ AsyncQueueParams, BundleField, BundleFieldBase, BundleKeyBase, CreditedDelay, groupByIntoSeq, RationalDirection, SimpleProduct } import scala.math.max //These transfer sizes describe requests issued from masters on the A channel that will be responded by slaves on the D channel case class TLMasterToSlaveTransferSizes( // Supports both Acquire+Release of the following two sizes: acquireT: TransferSizes = TransferSizes.none, acquireB: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none) extends TLCommonTransferSizes { def intersect(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .intersect(rhs.acquireT), acquireB = acquireB .intersect(rhs.acquireB), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint)) def mincover(rhs: TLMasterToSlaveTransferSizes) = TLMasterToSlaveTransferSizes( acquireT = acquireT .mincover(rhs.acquireT), acquireB = acquireB .mincover(rhs.acquireB), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint)) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(acquireT, "T"), str(acquireB, "B"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""acquireT = ${acquireT} |acquireB = ${acquireB} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLMasterToSlaveTransferSizes { def unknownEmits = TLMasterToSlaveTransferSizes( acquireT = TransferSizes(1, 4096), acquireB = TransferSizes(1, 4096), arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096)) def unknownSupports = TLMasterToSlaveTransferSizes() } //These transfer sizes describe requests issued from slaves on the B channel that will be responded by masters on the C channel case class TLSlaveToMasterTransferSizes( probe: TransferSizes = TransferSizes.none, arithmetic: TransferSizes = TransferSizes.none, logical: TransferSizes = TransferSizes.none, get: TransferSizes = TransferSizes.none, putFull: TransferSizes = TransferSizes.none, putPartial: TransferSizes = TransferSizes.none, hint: TransferSizes = TransferSizes.none ) extends TLCommonTransferSizes { def intersect(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .intersect(rhs.probe), arithmetic = arithmetic.intersect(rhs.arithmetic), logical = logical .intersect(rhs.logical), get = get .intersect(rhs.get), putFull = putFull .intersect(rhs.putFull), putPartial = putPartial.intersect(rhs.putPartial), hint = hint .intersect(rhs.hint) ) def mincover(rhs: TLSlaveToMasterTransferSizes) = TLSlaveToMasterTransferSizes( probe = probe .mincover(rhs.probe), arithmetic = arithmetic.mincover(rhs.arithmetic), logical = logical .mincover(rhs.logical), get = get .mincover(rhs.get), putFull = putFull .mincover(rhs.putFull), putPartial = putPartial.mincover(rhs.putPartial), hint = hint .mincover(rhs.hint) ) // Reduce rendering to a simple yes/no per field override def toString = { def str(x: TransferSizes, flag: String) = if (x.none) "" else flag def flags = Vector( str(probe, "P"), str(arithmetic, "A"), str(logical, "L"), str(get, "G"), str(putFull, "F"), str(putPartial, "P"), str(hint, "H")) flags.mkString } // Prints out the actual information in a user readable way def infoString = { s"""probe = ${probe} |arithmetic = ${arithmetic} |logical = ${logical} |get = ${get} |putFull = ${putFull} |putPartial = ${putPartial} |hint = ${hint} | |""".stripMargin } } object TLSlaveToMasterTransferSizes { def unknownEmits = TLSlaveToMasterTransferSizes( arithmetic = TransferSizes(1, 4096), logical = TransferSizes(1, 4096), get = TransferSizes(1, 4096), putFull = TransferSizes(1, 4096), putPartial = TransferSizes(1, 4096), hint = TransferSizes(1, 4096), probe = TransferSizes(1, 4096)) def unknownSupports = TLSlaveToMasterTransferSizes() } trait TLCommonTransferSizes { def arithmetic: TransferSizes def logical: TransferSizes def get: TransferSizes def putFull: TransferSizes def putPartial: TransferSizes def hint: TransferSizes } class TLSlaveParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], setName: Option[String], val address: Seq[AddressSet], val regionType: RegionType.T, val executable: Boolean, val fifoId: Option[Int], val supports: TLMasterToSlaveTransferSizes, val emits: TLSlaveToMasterTransferSizes, // By default, slaves are forbidden from issuing 'denied' responses (it prevents Fragmentation) val alwaysGrantsT: Boolean, // typically only true for CacheCork'd read-write devices; dual: neverReleaseData // If fifoId=Some, all accesses sent to the same fifoId are executed and ACK'd in FIFO order // Note: you can only rely on this FIFO behaviour if your TLMasterParameters include requestFifo val mayDenyGet: Boolean, // applies to: AccessAckData, GrantData val mayDenyPut: Boolean) // applies to: AccessAck, Grant, HintAck // ReleaseAck may NEVER be denied extends SimpleProduct { def sortedAddress = address.sorted override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlaveParameters] override def productPrefix = "TLSlaveParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 11 def productElement(n: Int): Any = n match { case 0 => name case 1 => address case 2 => resources case 3 => regionType case 4 => executable case 5 => fifoId case 6 => supports case 7 => emits case 8 => alwaysGrantsT case 9 => mayDenyGet case 10 => mayDenyPut case _ => throw new IndexOutOfBoundsException(n.toString) } def supportsAcquireT: TransferSizes = supports.acquireT def supportsAcquireB: TransferSizes = supports.acquireB def supportsArithmetic: TransferSizes = supports.arithmetic def supportsLogical: TransferSizes = supports.logical def supportsGet: TransferSizes = supports.get def supportsPutFull: TransferSizes = supports.putFull def supportsPutPartial: TransferSizes = supports.putPartial def supportsHint: TransferSizes = supports.hint require (!address.isEmpty, "Address cannot be empty") address.foreach { a => require (a.finite, "Address must be finite") } address.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } require (supportsPutFull.contains(supportsPutPartial), s"PutFull($supportsPutFull) < PutPartial($supportsPutPartial)") require (supportsPutFull.contains(supportsArithmetic), s"PutFull($supportsPutFull) < Arithmetic($supportsArithmetic)") require (supportsPutFull.contains(supportsLogical), s"PutFull($supportsPutFull) < Logical($supportsLogical)") require (supportsGet.contains(supportsArithmetic), s"Get($supportsGet) < Arithmetic($supportsArithmetic)") require (supportsGet.contains(supportsLogical), s"Get($supportsGet) < Logical($supportsLogical)") require (supportsAcquireB.contains(supportsAcquireT), s"AcquireB($supportsAcquireB) < AcquireT($supportsAcquireT)") require (!alwaysGrantsT || supportsAcquireT, s"Must supportAcquireT if promising to always grantT") // Make sure that the regionType agrees with the capabilities require (!supportsAcquireB || regionType >= RegionType.UNCACHED) // acquire -> uncached, tracked, cached require (regionType <= RegionType.UNCACHED || supportsAcquireB) // tracked, cached -> acquire require (regionType != RegionType.UNCACHED || supportsGet) // uncached -> supportsGet val name = setName.orElse(nodePath.lastOption.map(_.lazyModule.name)).getOrElse("disconnected") val maxTransfer = List( // Largest supported transfer of all types supportsAcquireT.max, supportsAcquireB.max, supportsArithmetic.max, supportsLogical.max, supportsGet.max, supportsPutFull.max, supportsPutPartial.max).max val maxAddress = address.map(_.max).max val minAlignment = address.map(_.alignment).min // The device had better not support a transfer larger than its alignment require (minAlignment >= maxTransfer, s"Bad $address: minAlignment ($minAlignment) must be >= maxTransfer ($maxTransfer)") def toResource: ResourceAddress = { ResourceAddress(address, ResourcePermissions( r = supportsAcquireB || supportsGet, w = supportsAcquireT || supportsPutFull, x = executable, c = supportsAcquireB, a = supportsArithmetic && supportsLogical)) } def findTreeViolation() = nodePath.find { case _: MixedAdapterNode[_, _, _, _, _, _, _, _] => false case _: SinkNode[_, _, _, _, _] => false case node => node.inputs.size != 1 } def isTree = findTreeViolation() == None def infoString = { s"""Slave Name = ${name} |Slave Address = ${address} |supports = ${supports.infoString} | |""".stripMargin } def v1copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { new TLSlaveParameters( setName = setName, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = emits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: Option[String] = setName, address: Seq[AddressSet] = address, regionType: RegionType.T = regionType, executable: Boolean = executable, fifoId: Option[Int] = fifoId, supports: TLMasterToSlaveTransferSizes = supports, emits: TLSlaveToMasterTransferSizes = emits, alwaysGrantsT: Boolean = alwaysGrantsT, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } @deprecated("Use v1copy instead of copy","") def copy( address: Seq[AddressSet] = address, resources: Seq[Resource] = resources, regionType: RegionType.T = regionType, executable: Boolean = executable, nodePath: Seq[BaseNode] = nodePath, supportsAcquireT: TransferSizes = supports.acquireT, supportsAcquireB: TransferSizes = supports.acquireB, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint, mayDenyGet: Boolean = mayDenyGet, mayDenyPut: Boolean = mayDenyPut, alwaysGrantsT: Boolean = alwaysGrantsT, fifoId: Option[Int] = fifoId) = { v1copy( address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supportsAcquireT = supportsAcquireT, supportsAcquireB = supportsAcquireB, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } } object TLSlaveParameters { def v1( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = { new TLSlaveParameters( setName = None, address = address, resources = resources, regionType = regionType, executable = executable, nodePath = nodePath, supports = TLMasterToSlaveTransferSizes( acquireT = supportsAcquireT, acquireB = supportsAcquireB, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLSlaveToMasterTransferSizes.unknownEmits, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut, alwaysGrantsT = alwaysGrantsT, fifoId = fifoId) } def v2( address: Seq[AddressSet], nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Seq(), name: Option[String] = None, regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, fifoId: Option[Int] = None, supports: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownSupports, emits: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownEmits, alwaysGrantsT: Boolean = false, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false) = { new TLSlaveParameters( nodePath = nodePath, resources = resources, setName = name, address = address, regionType = regionType, executable = executable, fifoId = fifoId, supports = supports, emits = emits, alwaysGrantsT = alwaysGrantsT, mayDenyGet = mayDenyGet, mayDenyPut = mayDenyPut) } } object TLManagerParameters { @deprecated("Use TLSlaveParameters.v1 instead of TLManagerParameters","") def apply( address: Seq[AddressSet], resources: Seq[Resource] = Seq(), regionType: RegionType.T = RegionType.GET_EFFECTS, executable: Boolean = false, nodePath: Seq[BaseNode] = Seq(), supportsAcquireT: TransferSizes = TransferSizes.none, supportsAcquireB: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none, mayDenyGet: Boolean = false, mayDenyPut: Boolean = false, alwaysGrantsT: Boolean = false, fifoId: Option[Int] = None) = TLSlaveParameters.v1( address, resources, regionType, executable, nodePath, supportsAcquireT, supportsAcquireB, supportsArithmetic, supportsLogical, supportsGet, supportsPutFull, supportsPutPartial, supportsHint, mayDenyGet, mayDenyPut, alwaysGrantsT, fifoId, ) } case class TLChannelBeatBytes(a: Option[Int], b: Option[Int], c: Option[Int], d: Option[Int]) { def members = Seq(a, b, c, d) members.collect { case Some(beatBytes) => require (isPow2(beatBytes), "Data channel width must be a power of 2") } } object TLChannelBeatBytes{ def apply(beatBytes: Int): TLChannelBeatBytes = TLChannelBeatBytes( Some(beatBytes), Some(beatBytes), Some(beatBytes), Some(beatBytes)) def apply(): TLChannelBeatBytes = TLChannelBeatBytes( None, None, None, None) } class TLSlavePortParameters private( val slaves: Seq[TLSlaveParameters], val channelBytes: TLChannelBeatBytes, val endSinkId: Int, val minLatency: Int, val responseFields: Seq[BundleFieldBase], val requestKeys: Seq[BundleKeyBase]) extends SimpleProduct { def sortedSlaves = slaves.sortBy(_.sortedAddress.head) override def canEqual(that: Any): Boolean = that.isInstanceOf[TLSlavePortParameters] override def productPrefix = "TLSlavePortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => slaves case 1 => channelBytes case 2 => endSinkId case 3 => minLatency case 4 => responseFields case 5 => requestKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!slaves.isEmpty, "Slave ports must have slaves") require (endSinkId >= 0, "Sink ids cannot be negative") require (minLatency >= 0, "Minimum required latency cannot be negative") // Using this API implies you cannot handle mixed-width busses def beatBytes = { channelBytes.members.foreach { width => require (width.isDefined && width == channelBytes.a) } channelBytes.a.get } // TODO this should be deprecated def managers = slaves def requireFifo(policy: TLFIFOFixer.Policy = TLFIFOFixer.allFIFO) = { val relevant = slaves.filter(m => policy(m)) relevant.foreach { m => require(m.fifoId == relevant.head.fifoId, s"${m.name} had fifoId ${m.fifoId}, which was not homogeneous (${slaves.map(s => (s.name, s.fifoId))}) ") } } // Bounds on required sizes def maxAddress = slaves.map(_.maxAddress).max def maxTransfer = slaves.map(_.maxTransfer).max def mayDenyGet = slaves.exists(_.mayDenyGet) def mayDenyPut = slaves.exists(_.mayDenyPut) // Diplomatically determined operation sizes emitted by all outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = slaves.map(_.emits).reduce( _ intersect _) // Operation Emitted by at least one outward Slaves // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = slaves.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val allSupportClaims = slaves.map(_.supports).reduce( _ intersect _) val allSupportAcquireT = allSupportClaims.acquireT val allSupportAcquireB = allSupportClaims.acquireB val allSupportArithmetic = allSupportClaims.arithmetic val allSupportLogical = allSupportClaims.logical val allSupportGet = allSupportClaims.get val allSupportPutFull = allSupportClaims.putFull val allSupportPutPartial = allSupportClaims.putPartial val allSupportHint = allSupportClaims.hint // Operation supported by at least one outward Slaves // as opposed to supports* which generate circuitry to check which specific addresses val anySupportClaims = slaves.map(_.supports).reduce(_ mincover _) val anySupportAcquireT = !anySupportClaims.acquireT.none val anySupportAcquireB = !anySupportClaims.acquireB.none val anySupportArithmetic = !anySupportClaims.arithmetic.none val anySupportLogical = !anySupportClaims.logical.none val anySupportGet = !anySupportClaims.get.none val anySupportPutFull = !anySupportClaims.putFull.none val anySupportPutPartial = !anySupportClaims.putPartial.none val anySupportHint = !anySupportClaims.hint.none // Supporting Acquire means being routable for GrantAck require ((endSinkId == 0) == !anySupportAcquireB) // These return Option[TLSlaveParameters] for your convenience def find(address: BigInt) = slaves.find(_.address.exists(_.contains(address))) // The safe version will check the entire address def findSafe(address: UInt) = VecInit(sortedSlaves.map(_.address.map(_.contains(address)).reduce(_ || _))) // The fast version assumes the address is valid (you probably want fastProperty instead of this function) def findFast(address: UInt) = { val routingMask = AddressDecoder(slaves.map(_.address)) VecInit(sortedSlaves.map(_.address.map(_.widen(~routingMask)).distinct.map(_.contains(address)).reduce(_ || _))) } // Compute the simplest AddressSets that decide a key def fastPropertyGroup[K](p: TLSlaveParameters => K): Seq[(K, Seq[AddressSet])] = { val groups = groupByIntoSeq(sortedSlaves.map(m => (p(m), m.address)))( _._1).map { case (k, vs) => k -> vs.flatMap(_._2) } val reductionMask = AddressDecoder(groups.map(_._2)) groups.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~reductionMask)).distinct) } } // Select a property def fastProperty[K, D <: Data](address: UInt, p: TLSlaveParameters => K, d: K => D): D = Mux1H(fastPropertyGroup(p).map { case (v, a) => (a.map(_.contains(address)).reduce(_||_), d(v)) }) // Note: returns the actual fifoId + 1 or 0 if None def findFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.map(_+1).getOrElse(0), (i:Int) => i.U) def hasFifoIdFast(address: UInt) = fastProperty(address, _.fifoId.isDefined, (b:Boolean) => b.B) // Does this Port manage this ID/address? def containsSafe(address: UInt) = findSafe(address).reduce(_ || _) private def addressHelper( // setting safe to false indicates that all addresses are expected to be legal, which might reduce circuit complexity safe: Boolean, // member filters out the sizes being checked based on the opcode being emitted or supported member: TLSlaveParameters => TransferSizes, address: UInt, lgSize: UInt, // range provides a limit on the sizes that are expected to be evaluated, which might reduce circuit complexity range: Option[TransferSizes]): Bool = { // trim reduces circuit complexity by intersecting checked sizes with the range argument def trim(x: TransferSizes) = range.map(_.intersect(x)).getOrElse(x) // groupBy returns an unordered map, convert back to Seq and sort the result for determinism // groupByIntoSeq is turning slaves into trimmed membership sizes // We are grouping all the slaves by their transfer size where // if they support the trimmed size then // member is the type of transfer that you are looking for (What you are trying to filter on) // When you consider membership, you are trimming the sizes to only the ones that you care about // you are filtering the slaves based on both whether they support a particular opcode and the size // Grouping the slaves based on the actual transfer size range they support // intersecting the range and checking their membership // FOR SUPPORTCASES instead of returning the list of slaves, // you are returning a map from transfer size to the set of // address sets that are supported for that transfer size // find all the slaves that support a certain type of operation and then group their addresses by the supported size // for every size there could be multiple address ranges // safety is a trade off between checking between all possible addresses vs only the addresses // that are known to have supported sizes // the trade off is 'checking all addresses is a more expensive circuit but will always give you // the right answer even if you give it an illegal address' // the not safe version is a cheaper circuit but if you give it an illegal address then it might produce the wrong answer // fast presumes address legality // This groupByIntoSeq deterministically groups all address sets for which a given `member` transfer size applies. // In the resulting Map of cases, the keys are transfer sizes and the values are all address sets which emit or support that size. val supportCases = groupByIntoSeq(slaves)(m => trim(member(m))).map { case (k: TransferSizes, vs: Seq[TLSlaveParameters]) => k -> vs.flatMap(_.address) } // safe produces a circuit that compares against all possible addresses, // whereas fast presumes that the address is legal but uses an efficient address decoder val mask = if (safe) ~BigInt(0) else AddressDecoder(supportCases.map(_._2)) // Simplified creates the most concise possible representation of each cases' address sets based on the mask. val simplified = supportCases.map { case (k, seq) => k -> AddressSet.unify(seq.map(_.widen(~mask)).distinct) } simplified.map { case (s, a) => // s is a size, you are checking for this size either the size of the operation is in s // We return an or-reduction of all the cases, checking whether any contains both the dynamic size and dynamic address on the wire. ((Some(s) == range).B || s.containsLg(lgSize)) && a.map(_.contains(address)).reduce(_||_) }.foldLeft(false.B)(_||_) } def supportsAcquireTSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireT, address, lgSize, range) def supportsAcquireBSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.acquireB, address, lgSize, range) def supportsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.arithmetic, address, lgSize, range) def supportsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.logical, address, lgSize, range) def supportsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.get, address, lgSize, range) def supportsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putFull, address, lgSize, range) def supportsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.putPartial, address, lgSize, range) def supportsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.supports.hint, address, lgSize, range) def supportsAcquireTFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireT, address, lgSize, range) def supportsAcquireBFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.acquireB, address, lgSize, range) def supportsArithmeticFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.arithmetic, address, lgSize, range) def supportsLogicalFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.logical, address, lgSize, range) def supportsGetFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.get, address, lgSize, range) def supportsPutFullFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putFull, address, lgSize, range) def supportsPutPartialFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.putPartial, address, lgSize, range) def supportsHintFast (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(false, _.supports.hint, address, lgSize, range) def emitsProbeSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.probe, address, lgSize, range) def emitsArithmeticSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.arithmetic, address, lgSize, range) def emitsLogicalSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.logical, address, lgSize, range) def emitsGetSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.get, address, lgSize, range) def emitsPutFullSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putFull, address, lgSize, range) def emitsPutPartialSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.putPartial, address, lgSize, range) def emitsHintSafe (address: UInt, lgSize: UInt, range: Option[TransferSizes] = None) = addressHelper(true, _.emits.hint, address, lgSize, range) def findTreeViolation() = slaves.flatMap(_.findTreeViolation()).headOption def isTree = !slaves.exists(!_.isTree) def infoString = "Slave Port Beatbytes = " + beatBytes + "\n" + "Slave Port MinLatency = " + minLatency + "\n\n" + slaves.map(_.infoString).mkString def v1copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = managers, channelBytes = if (beatBytes != -1) TLChannelBeatBytes(beatBytes) else channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } def v2copy( slaves: Seq[TLSlaveParameters] = slaves, channelBytes: TLChannelBeatBytes = channelBytes, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { new TLSlavePortParameters( slaves = slaves, channelBytes = channelBytes, endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } @deprecated("Use v1copy instead of copy","") def copy( managers: Seq[TLSlaveParameters] = slaves, beatBytes: Int = -1, endSinkId: Int = endSinkId, minLatency: Int = minLatency, responseFields: Seq[BundleFieldBase] = responseFields, requestKeys: Seq[BundleKeyBase] = requestKeys) = { v1copy( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } object TLSlavePortParameters { def v1( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { new TLSlavePortParameters( slaves = managers, channelBytes = TLChannelBeatBytes(beatBytes), endSinkId = endSinkId, minLatency = minLatency, responseFields = responseFields, requestKeys = requestKeys) } } object TLManagerPortParameters { @deprecated("Use TLSlavePortParameters.v1 instead of TLManagerPortParameters","") def apply( managers: Seq[TLSlaveParameters], beatBytes: Int, endSinkId: Int = 0, minLatency: Int = 0, responseFields: Seq[BundleFieldBase] = Nil, requestKeys: Seq[BundleKeyBase] = Nil) = { TLSlavePortParameters.v1( managers, beatBytes, endSinkId, minLatency, responseFields, requestKeys) } } class TLMasterParameters private( val nodePath: Seq[BaseNode], val resources: Seq[Resource], val name: String, val visibility: Seq[AddressSet], val unusedRegionTypes: Set[RegionType.T], val executesOnly: Boolean, val requestFifo: Boolean, // only a request, not a requirement. applies to A, not C. val supports: TLSlaveToMasterTransferSizes, val emits: TLMasterToSlaveTransferSizes, val neverReleasesData: Boolean, val sourceId: IdRange) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterParameters] override def productPrefix = "TLMasterParameters" // We intentionally omit nodePath for equality testing / formatting def productArity: Int = 10 def productElement(n: Int): Any = n match { case 0 => name case 1 => sourceId case 2 => resources case 3 => visibility case 4 => unusedRegionTypes case 5 => executesOnly case 6 => requestFifo case 7 => supports case 8 => emits case 9 => neverReleasesData case _ => throw new IndexOutOfBoundsException(n.toString) } require (!sourceId.isEmpty) require (!visibility.isEmpty) require (supports.putFull.contains(supports.putPartial)) // We only support these operations if we support Probe (ie: we're a cache) require (supports.probe.contains(supports.arithmetic)) require (supports.probe.contains(supports.logical)) require (supports.probe.contains(supports.get)) require (supports.probe.contains(supports.putFull)) require (supports.probe.contains(supports.putPartial)) require (supports.probe.contains(supports.hint)) visibility.combinations(2).foreach { case Seq(x,y) => require (!x.overlaps(y), s"$x and $y overlap.") } val maxTransfer = List( supports.probe.max, supports.arithmetic.max, supports.logical.max, supports.get.max, supports.putFull.max, supports.putPartial.max).max def infoString = { s"""Master Name = ${name} |visibility = ${visibility} |emits = ${emits.infoString} |sourceId = ${sourceId} | |""".stripMargin } def v1copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { new TLMasterParameters( nodePath = nodePath, resources = this.resources, name = name, visibility = visibility, unusedRegionTypes = this.unusedRegionTypes, executesOnly = this.executesOnly, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = this.emits, neverReleasesData = this.neverReleasesData, sourceId = sourceId) } def v2copy( nodePath: Seq[BaseNode] = nodePath, resources: Seq[Resource] = resources, name: String = name, visibility: Seq[AddressSet] = visibility, unusedRegionTypes: Set[RegionType.T] = unusedRegionTypes, executesOnly: Boolean = executesOnly, requestFifo: Boolean = requestFifo, supports: TLSlaveToMasterTransferSizes = supports, emits: TLMasterToSlaveTransferSizes = emits, neverReleasesData: Boolean = neverReleasesData, sourceId: IdRange = sourceId) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } @deprecated("Use v1copy instead of copy","") def copy( name: String = name, sourceId: IdRange = sourceId, nodePath: Seq[BaseNode] = nodePath, requestFifo: Boolean = requestFifo, visibility: Seq[AddressSet] = visibility, supportsProbe: TransferSizes = supports.probe, supportsArithmetic: TransferSizes = supports.arithmetic, supportsLogical: TransferSizes = supports.logical, supportsGet: TransferSizes = supports.get, supportsPutFull: TransferSizes = supports.putFull, supportsPutPartial: TransferSizes = supports.putPartial, supportsHint: TransferSizes = supports.hint) = { v1copy( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } object TLMasterParameters { def v1( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { new TLMasterParameters( nodePath = nodePath, resources = Nil, name = name, visibility = visibility, unusedRegionTypes = Set(), executesOnly = false, requestFifo = requestFifo, supports = TLSlaveToMasterTransferSizes( probe = supportsProbe, arithmetic = supportsArithmetic, logical = supportsLogical, get = supportsGet, putFull = supportsPutFull, putPartial = supportsPutPartial, hint = supportsHint), emits = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData = false, sourceId = sourceId) } def v2( nodePath: Seq[BaseNode] = Seq(), resources: Seq[Resource] = Nil, name: String, visibility: Seq[AddressSet] = Seq(AddressSet(0, ~0)), unusedRegionTypes: Set[RegionType.T] = Set(), executesOnly: Boolean = false, requestFifo: Boolean = false, supports: TLSlaveToMasterTransferSizes = TLSlaveToMasterTransferSizes.unknownSupports, emits: TLMasterToSlaveTransferSizes = TLMasterToSlaveTransferSizes.unknownEmits, neverReleasesData: Boolean = false, sourceId: IdRange = IdRange(0,1)) = { new TLMasterParameters( nodePath = nodePath, resources = resources, name = name, visibility = visibility, unusedRegionTypes = unusedRegionTypes, executesOnly = executesOnly, requestFifo = requestFifo, supports = supports, emits = emits, neverReleasesData = neverReleasesData, sourceId = sourceId) } } object TLClientParameters { @deprecated("Use TLMasterParameters.v1 instead of TLClientParameters","") def apply( name: String, sourceId: IdRange = IdRange(0,1), nodePath: Seq[BaseNode] = Seq(), requestFifo: Boolean = false, visibility: Seq[AddressSet] = Seq(AddressSet.everything), supportsProbe: TransferSizes = TransferSizes.none, supportsArithmetic: TransferSizes = TransferSizes.none, supportsLogical: TransferSizes = TransferSizes.none, supportsGet: TransferSizes = TransferSizes.none, supportsPutFull: TransferSizes = TransferSizes.none, supportsPutPartial: TransferSizes = TransferSizes.none, supportsHint: TransferSizes = TransferSizes.none) = { TLMasterParameters.v1( name = name, sourceId = sourceId, nodePath = nodePath, requestFifo = requestFifo, visibility = visibility, supportsProbe = supportsProbe, supportsArithmetic = supportsArithmetic, supportsLogical = supportsLogical, supportsGet = supportsGet, supportsPutFull = supportsPutFull, supportsPutPartial = supportsPutPartial, supportsHint = supportsHint) } } class TLMasterPortParameters private( val masters: Seq[TLMasterParameters], val channelBytes: TLChannelBeatBytes, val minLatency: Int, val echoFields: Seq[BundleFieldBase], val requestFields: Seq[BundleFieldBase], val responseKeys: Seq[BundleKeyBase]) extends SimpleProduct { override def canEqual(that: Any): Boolean = that.isInstanceOf[TLMasterPortParameters] override def productPrefix = "TLMasterPortParameters" def productArity: Int = 6 def productElement(n: Int): Any = n match { case 0 => masters case 1 => channelBytes case 2 => minLatency case 3 => echoFields case 4 => requestFields case 5 => responseKeys case _ => throw new IndexOutOfBoundsException(n.toString) } require (!masters.isEmpty) require (minLatency >= 0) def clients = masters // Require disjoint ranges for Ids IdRange.overlaps(masters.map(_.sourceId)).foreach { case (x, y) => require (!x.overlaps(y), s"TLClientParameters.sourceId ${x} overlaps ${y}") } // Bounds on required sizes def endSourceId = masters.map(_.sourceId.end).max def maxTransfer = masters.map(_.maxTransfer).max // The unused sources < endSourceId def unusedSources: Seq[Int] = { val usedSources = masters.map(_.sourceId).sortBy(_.start) ((Seq(0) ++ usedSources.map(_.end)) zip usedSources.map(_.start)) flatMap { case (end, start) => end until start } } // Diplomatically determined operation sizes emitted by all inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val allEmitClaims = masters.map(_.emits).reduce( _ intersect _) // Diplomatically determined operation sizes Emitted by at least one inward Masters // as opposed to emits* which generate circuitry to check which specific addresses val anyEmitClaims = masters.map(_.emits).reduce(_ mincover _) // Diplomatically determined operation sizes supported by all inward Masters // as opposed to supports* which generate circuitry to check which specific addresses val allSupportProbe = masters.map(_.supports.probe) .reduce(_ intersect _) val allSupportArithmetic = masters.map(_.supports.arithmetic).reduce(_ intersect _) val allSupportLogical = masters.map(_.supports.logical) .reduce(_ intersect _) val allSupportGet = masters.map(_.supports.get) .reduce(_ intersect _) val allSupportPutFull = masters.map(_.supports.putFull) .reduce(_ intersect _) val allSupportPutPartial = masters.map(_.supports.putPartial).reduce(_ intersect _) val allSupportHint = masters.map(_.supports.hint) .reduce(_ intersect _) // Diplomatically determined operation sizes supported by at least one master // as opposed to supports* which generate circuitry to check which specific addresses val anySupportProbe = masters.map(!_.supports.probe.none) .reduce(_ || _) val anySupportArithmetic = masters.map(!_.supports.arithmetic.none).reduce(_ || _) val anySupportLogical = masters.map(!_.supports.logical.none) .reduce(_ || _) val anySupportGet = masters.map(!_.supports.get.none) .reduce(_ || _) val anySupportPutFull = masters.map(!_.supports.putFull.none) .reduce(_ || _) val anySupportPutPartial = masters.map(!_.supports.putPartial.none).reduce(_ || _) val anySupportHint = masters.map(!_.supports.hint.none) .reduce(_ || _) // These return Option[TLMasterParameters] for your convenience def find(id: Int) = masters.find(_.sourceId.contains(id)) // Synthesizable lookup methods def find(id: UInt) = VecInit(masters.map(_.sourceId.contains(id))) def contains(id: UInt) = find(id).reduce(_ || _) def requestFifo(id: UInt) = Mux1H(find(id), masters.map(c => c.requestFifo.B)) // Available during RTL runtime, checks to see if (id, size) is supported by the master's (client's) diplomatic parameters private def sourceIdHelper(member: TLMasterParameters => TransferSizes)(id: UInt, lgSize: UInt) = { val allSame = masters.map(member(_) == member(masters(0))).reduce(_ && _) // this if statement is a coarse generalization of the groupBy in the sourceIdHelper2 version; // the case where there is only one group. if (allSame) member(masters(0)).containsLg(lgSize) else { // Find the master associated with ID and returns whether that particular master is able to receive transaction of lgSize Mux1H(find(id), masters.map(member(_).containsLg(lgSize))) } } // Check for support of a given operation at a specific id val supportsProbe = sourceIdHelper(_.supports.probe) _ val supportsArithmetic = sourceIdHelper(_.supports.arithmetic) _ val supportsLogical = sourceIdHelper(_.supports.logical) _ val supportsGet = sourceIdHelper(_.supports.get) _ val supportsPutFull = sourceIdHelper(_.supports.putFull) _ val supportsPutPartial = sourceIdHelper(_.supports.putPartial) _ val supportsHint = sourceIdHelper(_.supports.hint) _ // TODO: Merge sourceIdHelper2 with sourceIdHelper private def sourceIdHelper2( member: TLMasterParameters => TransferSizes, sourceId: UInt, lgSize: UInt): Bool = { // Because sourceIds are uniquely owned by each master, we use them to group the // cases that have to be checked. val emitCases = groupByIntoSeq(masters)(m => member(m)).map { case (k, vs) => k -> vs.map(_.sourceId) } emitCases.map { case (s, a) => (s.containsLg(lgSize)) && a.map(_.contains(sourceId)).reduce(_||_) }.foldLeft(false.B)(_||_) } // Check for emit of a given operation at a specific id def emitsAcquireT (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireT, sourceId, lgSize) def emitsAcquireB (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.acquireB, sourceId, lgSize) def emitsArithmetic(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.arithmetic, sourceId, lgSize) def emitsLogical (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.logical, sourceId, lgSize) def emitsGet (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.get, sourceId, lgSize) def emitsPutFull (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putFull, sourceId, lgSize) def emitsPutPartial(sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.putPartial, sourceId, lgSize) def emitsHint (sourceId: UInt, lgSize: UInt) = sourceIdHelper2(_.emits.hint, sourceId, lgSize) def infoString = masters.map(_.infoString).mkString def v1copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = clients, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2copy( masters: Seq[TLMasterParameters] = masters, channelBytes: TLChannelBeatBytes = channelBytes, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } @deprecated("Use v1copy instead of copy","") def copy( clients: Seq[TLMasterParameters] = masters, minLatency: Int = minLatency, echoFields: Seq[BundleFieldBase] = echoFields, requestFields: Seq[BundleFieldBase] = requestFields, responseKeys: Seq[BundleKeyBase] = responseKeys) = { v1copy( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLClientPortParameters { @deprecated("Use TLMasterPortParameters.v1 instead of TLClientPortParameters","") def apply( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { TLMasterPortParameters.v1( clients, minLatency, echoFields, requestFields, responseKeys) } } object TLMasterPortParameters { def v1( clients: Seq[TLMasterParameters], minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = clients, channelBytes = TLChannelBeatBytes(), minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } def v2( masters: Seq[TLMasterParameters], channelBytes: TLChannelBeatBytes = TLChannelBeatBytes(), minLatency: Int = 0, echoFields: Seq[BundleFieldBase] = Nil, requestFields: Seq[BundleFieldBase] = Nil, responseKeys: Seq[BundleKeyBase] = Nil) = { new TLMasterPortParameters( masters = masters, channelBytes = channelBytes, minLatency = minLatency, echoFields = echoFields, requestFields = requestFields, responseKeys = responseKeys) } } case class TLBundleParameters( addressBits: Int, dataBits: Int, sourceBits: Int, sinkBits: Int, sizeBits: Int, echoFields: Seq[BundleFieldBase], requestFields: Seq[BundleFieldBase], responseFields: Seq[BundleFieldBase], hasBCE: Boolean) { // Chisel has issues with 0-width wires require (addressBits >= 1) require (dataBits >= 8) require (sourceBits >= 1) require (sinkBits >= 1) require (sizeBits >= 1) require (isPow2(dataBits)) echoFields.foreach { f => require (f.key.isControl, s"${f} is not a legal echo field") } val addrLoBits = log2Up(dataBits/8) // Used to uniquify bus IP names def shortName = s"a${addressBits}d${dataBits}s${sourceBits}k${sinkBits}z${sizeBits}" + (if (hasBCE) "c" else "u") def union(x: TLBundleParameters) = TLBundleParameters( max(addressBits, x.addressBits), max(dataBits, x.dataBits), max(sourceBits, x.sourceBits), max(sinkBits, x.sinkBits), max(sizeBits, x.sizeBits), echoFields = BundleField.union(echoFields ++ x.echoFields), requestFields = BundleField.union(requestFields ++ x.requestFields), responseFields = BundleField.union(responseFields ++ x.responseFields), hasBCE || x.hasBCE) } object TLBundleParameters { val emptyBundleParams = TLBundleParameters( addressBits = 1, dataBits = 8, sourceBits = 1, sinkBits = 1, sizeBits = 1, echoFields = Nil, requestFields = Nil, responseFields = Nil, hasBCE = false) def union(x: Seq[TLBundleParameters]) = x.foldLeft(emptyBundleParams)((x,y) => x.union(y)) def apply(master: TLMasterPortParameters, slave: TLSlavePortParameters) = new TLBundleParameters( addressBits = log2Up(slave.maxAddress + 1), dataBits = slave.beatBytes * 8, sourceBits = log2Up(master.endSourceId), sinkBits = log2Up(slave.endSinkId), sizeBits = log2Up(log2Ceil(max(master.maxTransfer, slave.maxTransfer))+1), echoFields = master.echoFields, requestFields = BundleField.accept(master.requestFields, slave.requestKeys), responseFields = BundleField.accept(slave.responseFields, master.responseKeys), hasBCE = master.anySupportProbe && slave.anySupportAcquireB) } case class TLEdgeParameters( master: TLMasterPortParameters, slave: TLSlavePortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { // legacy names: def manager = slave def client = master val maxTransfer = max(master.maxTransfer, slave.maxTransfer) val maxLgSize = log2Ceil(maxTransfer) // Sanity check the link... require (maxTransfer >= slave.beatBytes, s"Link's max transfer (${maxTransfer}) < ${slave.slaves.map(_.name)}'s beatBytes (${slave.beatBytes})") def diplomaticClaimsMasterToSlave = master.anyEmitClaims.intersect(slave.anySupportClaims) val bundle = TLBundleParameters(master, slave) def formatEdge = master.infoString + "\n" + slave.infoString } case class TLCreditedDelay( a: CreditedDelay, b: CreditedDelay, c: CreditedDelay, d: CreditedDelay, e: CreditedDelay) { def + (that: TLCreditedDelay): TLCreditedDelay = TLCreditedDelay( a = a + that.a, b = b + that.b, c = c + that.c, d = d + that.d, e = e + that.e) override def toString = s"(${a}, ${b}, ${c}, ${d}, ${e})" } object TLCreditedDelay { def apply(delay: CreditedDelay): TLCreditedDelay = apply(delay, delay.flip, delay, delay.flip, delay) } case class TLCreditedManagerPortParameters(delay: TLCreditedDelay, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLCreditedClientPortParameters(delay: TLCreditedDelay, base: TLMasterPortParameters) {def infoString = base.infoString} case class TLCreditedEdgeParameters(client: TLCreditedClientPortParameters, manager: TLCreditedManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val delay = client.delay + manager.delay val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLAsyncManagerPortParameters(async: AsyncQueueParams, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLAsyncClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLAsyncBundleParameters(async: AsyncQueueParams, base: TLBundleParameters) case class TLAsyncEdgeParameters(client: TLAsyncClientPortParameters, manager: TLAsyncManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLAsyncBundleParameters(manager.async, TLBundleParameters(client.base, manager.base)) def formatEdge = client.infoString + "\n" + manager.infoString } case class TLRationalManagerPortParameters(direction: RationalDirection, base: TLSlavePortParameters) {def infoString = base.infoString} case class TLRationalClientPortParameters(base: TLMasterPortParameters) {def infoString = base.infoString} case class TLRationalEdgeParameters(client: TLRationalClientPortParameters, manager: TLRationalManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends FormatEdge { val bundle = TLBundleParameters(client.base, manager.base) def formatEdge = client.infoString + "\n" + manager.infoString } // To be unified, devices must agree on all of these terms case class ManagerUnificationKey( resources: Seq[Resource], regionType: RegionType.T, executable: Boolean, supportsAcquireT: TransferSizes, supportsAcquireB: TransferSizes, supportsArithmetic: TransferSizes, supportsLogical: TransferSizes, supportsGet: TransferSizes, supportsPutFull: TransferSizes, supportsPutPartial: TransferSizes, supportsHint: TransferSizes) object ManagerUnificationKey { def apply(x: TLSlaveParameters): ManagerUnificationKey = ManagerUnificationKey( resources = x.resources, regionType = x.regionType, executable = x.executable, supportsAcquireT = x.supportsAcquireT, supportsAcquireB = x.supportsAcquireB, supportsArithmetic = x.supportsArithmetic, supportsLogical = x.supportsLogical, supportsGet = x.supportsGet, supportsPutFull = x.supportsPutFull, supportsPutPartial = x.supportsPutPartial, supportsHint = x.supportsHint) } object ManagerUnification { def apply(slaves: Seq[TLSlaveParameters]): List[TLSlaveParameters] = { slaves.groupBy(ManagerUnificationKey.apply).values.map { seq => val agree = seq.forall(_.fifoId == seq.head.fifoId) seq(0).v1copy( address = AddressSet.unify(seq.flatMap(_.address)), fifoId = if (agree) seq(0).fifoId else None) }.toList } } case class TLBufferParams( a: BufferParams = BufferParams.none, b: BufferParams = BufferParams.none, c: BufferParams = BufferParams.none, d: BufferParams = BufferParams.none, e: BufferParams = BufferParams.none ) extends DirectedBuffers[TLBufferParams] { def copyIn(x: BufferParams) = this.copy(b = x, d = x) def copyOut(x: BufferParams) = this.copy(a = x, c = x, e = x) def copyInOut(x: BufferParams) = this.copyIn(x).copyOut(x) } /** Pretty printing of TL source id maps */ class TLSourceIdMap(tl: TLMasterPortParameters) extends IdMap[TLSourceIdMapEntry] { private val tlDigits = String.valueOf(tl.endSourceId-1).length() protected val fmt = s"\t[%${tlDigits}d, %${tlDigits}d) %s%s%s" private val sorted = tl.masters.sortBy(_.sourceId) val mapping: Seq[TLSourceIdMapEntry] = sorted.map { case c => TLSourceIdMapEntry(c.sourceId, c.name, c.supports.probe, c.requestFifo) } } case class TLSourceIdMapEntry(tlId: IdRange, name: String, isCache: Boolean, requestFifo: Boolean) extends IdMapEntry { val from = tlId val to = tlId val maxTransactionsInFlight = Some(tlId.size) } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_15( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [28:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [28:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_param_0 = io_in_d_bits_param; // @[Monitor.scala:36:7] wire [3:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire io_in_d_bits_sink_0 = io_in_d_bits_sink; // @[Monitor.scala:36:7] wire io_in_d_bits_denied_0 = io_in_d_bits_denied; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt_0 = io_in_d_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_a_bits_source = 1'h0; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_source = 1'h0; // @[Monitor.scala:36:7] wire mask_sub_sub_sub_0_1 = 1'h0; // @[Misc.scala:206:21] wire mask_sub_size = 1'h0; // @[Misc.scala:209:26] wire _mask_sub_acc_T = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_1 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_2 = 1'h0; // @[Misc.scala:215:38] wire _mask_sub_acc_T_3 = 1'h0; // @[Misc.scala:215:38] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _a_first_beats1_opdata_T = 1'h0; // @[Edges.scala:92:37] wire _a_first_beats1_opdata_T_1 = 1'h0; // @[Edges.scala:92:37] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire c_set = 1'h0; // @[Monitor.scala:738:34] wire c_set_wo_ready = 1'h0; // @[Monitor.scala:739:34] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_source = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_source = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire io_in_d_ready = 1'h1; // @[Monitor.scala:36:7] wire _source_ok_T = 1'h1; // @[Parameters.scala:46:9] wire _source_ok_WIRE_0 = 1'h1; // @[Parameters.scala:1138:31] wire mask_sub_sub_size = 1'h1; // @[Misc.scala:209:26] wire mask_size = 1'h1; // @[Misc.scala:209:26] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:46:9] wire _source_ok_WIRE_1_0 = 1'h1; // @[Parameters.scala:1138:31] wire a_first_beats1_opdata = 1'h1; // @[Edges.scala:92:28] wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire a_first_last = 1'h1; // @[Edges.scala:232:33] wire a_first_beats1_opdata_1 = 1'h1; // @[Edges.scala:92:28] wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire _same_cycle_resp_T_2 = 1'h1; // @[Monitor.scala:684:113] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire _same_cycle_resp_T_8 = 1'h1; // @[Monitor.scala:795:113] wire [8:0] a_first_beats1_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] a_first_beats1 = 9'h0; // @[Edges.scala:221:14] wire [8:0] a_first_count = 9'h0; // @[Edges.scala:234:25] wire [8:0] a_first_beats1_decode_1 = 9'h0; // @[Edges.scala:220:59] wire [8:0] a_first_beats1_1 = 9'h0; // @[Edges.scala:221:14] wire [8:0] a_first_count_1 = 9'h0; // @[Edges.scala:234:25] wire [8:0] c_first_beats1_decode = 9'h0; // @[Edges.scala:220:59] wire [8:0] c_first_beats1 = 9'h0; // @[Edges.scala:221:14] wire [8:0] _c_first_count_T = 9'h0; // @[Edges.scala:234:27] wire [8:0] c_first_count = 9'h0; // @[Edges.scala:234:25] wire [8:0] _c_first_counter_T = 9'h0; // @[Edges.scala:236:21] wire [8:0] c_first_counter1 = 9'h1FF; // @[Edges.scala:230:28] wire [9:0] _c_first_counter1_T = 10'h3FF; // @[Edges.scala:230:28] wire [3:0] io_in_a_bits_size = 4'h2; // @[Monitor.scala:36:7] wire [3:0] _mask_sizeOH_T = 4'h2; // @[Misc.scala:202:34] wire [2:0] io_in_a_bits_opcode = 3'h0; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param = 3'h0; // @[Monitor.scala:36:7] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [7:0] io_in_a_bits_mask = 8'hF; // @[Monitor.scala:36:7] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_first_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_first_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_wo_ready_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_wo_ready_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_interm_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_interm_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_opcodes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_opcodes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_sizes_set_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_sizes_set_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _c_probe_ack_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _c_probe_ack_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_1_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_2_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_3_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [28:0] _same_cycle_resp_WIRE_4_bits_address = 29'h0; // @[Bundles.scala:265:74] wire [28:0] _same_cycle_resp_WIRE_5_bits_address = 29'h0; // @[Bundles.scala:265:61] wire [3:0] _a_opcode_lookup_T = 4'h0; // @[Monitor.scala:637:69] wire [3:0] _a_size_lookup_T = 4'h0; // @[Monitor.scala:641:65] wire [3:0] _a_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:657:53] wire [3:0] _a_opcodes_set_T = 4'h0; // @[Monitor.scala:659:79] wire [3:0] _a_sizes_set_T = 4'h0; // @[Monitor.scala:660:77] wire [3:0] _d_opcodes_clr_T_4 = 4'h0; // @[Monitor.scala:680:101] wire [3:0] _d_sizes_clr_T_4 = 4'h0; // @[Monitor.scala:681:99] wire [3:0] _c_first_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_first_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_first_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] c_opcodes_set = 4'h0; // @[Monitor.scala:740:34] wire [3:0] _c_opcode_lookup_T = 4'h0; // @[Monitor.scala:749:69] wire [3:0] _c_size_lookup_T = 4'h0; // @[Monitor.scala:750:67] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_set_wo_ready_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_wo_ready_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_interm_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_opcodes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_opcodes_set_T = 4'h0; // @[Monitor.scala:767:79] wire [3:0] _c_sizes_set_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_sizes_set_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_sizes_set_T = 4'h0; // @[Monitor.scala:768:77] wire [3:0] _c_probe_ack_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _c_probe_ack_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _c_probe_ack_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _d_opcodes_clr_T_10 = 4'h0; // @[Monitor.scala:790:101] wire [3:0] _d_sizes_clr_T_10 = 4'h0; // @[Monitor.scala:791:99] wire [3:0] _same_cycle_resp_WIRE_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_1_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_2_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_3_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [3:0] _same_cycle_resp_WIRE_4_bits_size = 4'h0; // @[Bundles.scala:265:74] wire [3:0] _same_cycle_resp_WIRE_5_bits_size = 4'h0; // @[Bundles.scala:265:61] wire [30:0] _d_sizes_clr_T_5 = 31'hFF; // @[Monitor.scala:681:74] wire [30:0] _d_sizes_clr_T_11 = 31'hFF; // @[Monitor.scala:791:74] wire [15:0] _a_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hFF; // @[Monitor.scala:612:57] wire [15:0] _c_size_lookup_T_5 = 16'hFF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hFF; // @[Monitor.scala:724:57] wire [16:0] _a_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hFF; // @[Monitor.scala:612:57] wire [16:0] _c_size_lookup_T_4 = 17'hFF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hFF; // @[Monitor.scala:724:57] wire [15:0] _a_size_lookup_T_3 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h100; // @[Monitor.scala:612:51] wire [15:0] _c_size_lookup_T_3 = 16'h100; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h100; // @[Monitor.scala:724:51] wire [30:0] _d_opcodes_clr_T_5 = 31'hF; // @[Monitor.scala:680:76] wire [30:0] _d_opcodes_clr_T_11 = 31'hF; // @[Monitor.scala:790:76] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [3:0] _mask_sizeOH_T_1 = 4'h4; // @[OneHot.scala:65:12] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [1:0] _a_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _a_set_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _c_set_wo_ready_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _c_set_T = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_wo_ready_T_1 = 2'h1; // @[OneHot.scala:58:35] wire [1:0] _d_clr_T_1 = 2'h1; // @[OneHot.scala:58:35] wire [19:0] _c_sizes_set_T_1 = 20'h0; // @[Monitor.scala:768:52] wire [18:0] _c_opcodes_set_T_1 = 19'h0; // @[Monitor.scala:767:54] wire [4:0] _c_sizes_set_interm_T_1 = 5'h1; // @[Monitor.scala:766:59] wire [4:0] c_sizes_set_interm = 5'h0; // @[Monitor.scala:755:40] wire [4:0] _c_sizes_set_interm_T = 5'h0; // @[Monitor.scala:766:51] wire [3:0] _a_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:657:61] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [7:0] c_sizes_set = 8'h0; // @[Monitor.scala:741:34] wire [11:0] _c_first_beats1_decode_T_2 = 12'h0; // @[package.scala:243:46] wire [11:0] _c_first_beats1_decode_T_1 = 12'hFFF; // @[package.scala:243:76] wire [26:0] _c_first_beats1_decode_T = 27'hFFF; // @[package.scala:243:71] wire [4:0] _a_sizes_set_interm_T_1 = 5'h5; // @[Monitor.scala:658:59] wire [4:0] _a_sizes_set_interm_T = 5'h4; // @[Monitor.scala:658:51] wire [2:0] _mask_sizeOH_T_2 = 3'h4; // @[OneHot.scala:65:27] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] mask_sizeOH = 3'h5; // @[Misc.scala:202:81] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [11:0] is_aligned_mask = 12'h3; // @[package.scala:243:46] wire [11:0] _a_first_beats1_decode_T_2 = 12'h3; // @[package.scala:243:46] wire [11:0] _a_first_beats1_decode_T_5 = 12'h3; // @[package.scala:243:46] wire [11:0] _is_aligned_mask_T_1 = 12'hFFC; // @[package.scala:243:76] wire [11:0] _a_first_beats1_decode_T_1 = 12'hFFC; // @[package.scala:243:76] wire [11:0] _a_first_beats1_decode_T_4 = 12'hFFC; // @[package.scala:243:76] wire [26:0] _is_aligned_mask_T = 27'h3FFC; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T = 27'h3FFC; // @[package.scala:243:71] wire [26:0] _a_first_beats1_decode_T_3 = 27'h3FFC; // @[package.scala:243:71] wire [1:0] mask_sizeOH_shiftAmount = 2'h2; // @[OneHot.scala:64:49] wire [3:0] _a_size_lookup_T_2 = 4'h8; // @[Monitor.scala:641:117] wire [3:0] _d_sizes_clr_T = 4'h8; // @[Monitor.scala:681:48] wire [3:0] _c_size_lookup_T_2 = 4'h8; // @[Monitor.scala:750:119] wire [3:0] _d_sizes_clr_T_6 = 4'h8; // @[Monitor.scala:791:48] wire _d_first_T = io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T_1 = io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T_2 = io_in_d_valid_0; // @[Decoupled.scala:51:35] wire [28:0] _is_aligned_T = {27'h0, io_in_a_bits_address_0[1:0]}; // @[Monitor.scala:36:7] wire is_aligned = _is_aligned_T == 29'h0; // @[Edges.scala:21:{16,24}] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_0_2; // @[Misc.scala:214:27, :215:38] wire mask_sub_sub_0_1 = _mask_sub_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_0_1 = mask_sub_sub_0_1; // @[Misc.scala:215:29] wire mask_sub_1_1 = mask_sub_sub_0_1; // @[Misc.scala:215:29] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_1_2; // @[Misc.scala:214:27, :215:38] wire mask_sub_sub_1_1 = _mask_sub_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_1 = mask_sub_sub_1_1; // @[Misc.scala:215:29] wire mask_sub_3_1 = mask_sub_sub_1_1; // @[Misc.scala:215:29] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_eq; // @[Misc.scala:214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_eq_1; // @[Misc.scala:214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_eq_2; // @[Misc.scala:214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_eq_3; // @[Misc.scala:214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_eq_4; // @[Misc.scala:214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_eq_5; // @[Misc.scala:214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_eq_6; // @[Misc.scala:214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_eq_7; // @[Misc.scala:214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire _T_1201 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1201; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1201; // @[Decoupled.scala:51:35] wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35] reg [8:0] a_first_counter; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T = {1'h0, a_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1 = _a_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire [8:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] _a_first_counter_T = a_first ? 9'h0 : a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [28:0] address; // @[Monitor.scala:391:22] wire [26:0] _GEN = 27'hFFF << io_in_d_bits_size_0; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [26:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN; // @[package.scala:243:71] wire [11:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[11:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [8:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T = {1'h0, d_first_counter} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1 = _d_first_counter1_T[8:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [3:0] size_1; // @[Monitor.scala:540:22] reg sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] reg [1:0] inflight; // @[Monitor.scala:614:27] reg [3:0] inflight_opcodes; // @[Monitor.scala:616:35] wire [3:0] _a_opcode_lookup_T_1 = inflight_opcodes; // @[Monitor.scala:616:35, :637:44] reg [7:0] inflight_sizes; // @[Monitor.scala:618:33] wire [7:0] _a_size_lookup_T_1 = inflight_sizes; // @[Monitor.scala:618:33, :641:40] wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35] reg [8:0] a_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] a_first_counter1_1 = _a_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire [8:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] _a_first_counter_T_1 = a_first_1 ? 9'h0 : a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire [11:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_1; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_1 = _d_first_counter1_T_1[8:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire a_set; // @[Monitor.scala:626:34] wire a_set_wo_ready; // @[Monitor.scala:627:34] wire [3:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [7:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [15:0] _a_opcode_lookup_T_6 = {12'h0, _a_opcode_lookup_T_1}; // @[Monitor.scala:637:{44,97}] wire [15:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [7:0] a_size_lookup; // @[Monitor.scala:639:33] wire [15:0] _a_size_lookup_T_6 = {8'h0, _a_size_lookup_T_1}; // @[Monitor.scala:641:{40,91}] wire [15:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[15:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[7:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [4:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _T_1124 = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26] assign a_set_wo_ready = _T_1124; // @[Monitor.scala:627:34, :651:26] wire _same_cycle_resp_T; // @[Monitor.scala:684:44] assign _same_cycle_resp_T = _T_1124; // @[Monitor.scala:651:26, :684:44] assign a_set = _T_1201 & a_first_1; // @[Decoupled.scala:51:35] assign a_opcodes_set_interm = {3'h0, a_set}; // @[Monitor.scala:626:34, :646:40, :655:70, :657:28] assign a_sizes_set_interm = a_set ? 5'h5 : 5'h0; // @[Monitor.scala:626:34, :648:38, :655:70, :658:28] wire [18:0] _a_opcodes_set_T_1 = {15'h0, a_opcodes_set_interm}; // @[package.scala:243:71] assign a_opcodes_set = a_set ? _a_opcodes_set_T_1[3:0] : 4'h0; // @[Monitor.scala:626:34, :630:33, :655:70, :659:{28,54}] wire [19:0] _a_sizes_set_T_1 = {15'h0, a_sizes_set_interm}; // @[package.scala:243:71] assign a_sizes_set = a_set ? _a_sizes_set_T_1[7:0] : 8'h0; // @[Monitor.scala:626:34, :632:31, :655:70, :660:{28,52}] wire d_clr; // @[Monitor.scala:664:34] wire d_clr_wo_ready; // @[Monitor.scala:665:34] wire [3:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [7:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_0 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_0; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_0; // @[Monitor.scala:673:46, :783:46] wire _T_1173 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] assign d_clr_wo_ready = _T_1173 & ~d_release_ack; // @[Monitor.scala:665:34, :673:46, :674:{26,71,74}] assign d_clr = io_in_d_valid_0 & d_first_1 & ~d_release_ack; // @[Monitor.scala:36:7, :664:34, :673:46, :674:74, :678:{25,70}] assign d_opcodes_clr = {4{d_clr}}; // @[Monitor.scala:664:34, :668:33, :678:89, :680:21] assign d_sizes_clr = {8{d_clr}}; // @[Monitor.scala:664:34, :670:31, :678:89, :681:21] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire same_cycle_resp = _same_cycle_resp_T_1; // @[Monitor.scala:684:{55,88}] wire [1:0] _inflight_T = {inflight[1], inflight[0] | a_set}; // @[Monitor.scala:614:27, :626:34, :705:27] wire _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [1:0] _inflight_T_2 = {1'h0, _inflight_T[0] & _inflight_T_1}; // @[Monitor.scala:705:{27,36,38}] wire [3:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [3:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [3:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [7:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [7:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [7:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [1:0] inflight_1; // @[Monitor.scala:726:35] wire [1:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [3:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [3:0] _c_opcode_lookup_T_1 = inflight_opcodes_1; // @[Monitor.scala:727:35, :749:44] wire [3:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [7:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [7:0] _c_size_lookup_T_1 = inflight_sizes_1; // @[Monitor.scala:728:35, :750:42] wire [7:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [11:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[11:0]; // @[package.scala:243:{71,76}] wire [11:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [8:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[11:3]; // @[package.scala:243:46] wire [8:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 9'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [8:0] d_first_counter_2; // @[Edges.scala:229:27] wire [9:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 10'h1; // @[Edges.scala:229:27, :230:28] wire [8:0] d_first_counter1_2 = _d_first_counter1_T_2[8:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 9'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 9'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 9'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [8:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [8:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [8:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [7:0] c_size_lookup; // @[Monitor.scala:748:35] wire [15:0] _c_opcode_lookup_T_6 = {12'h0, _c_opcode_lookup_T_1}; // @[Monitor.scala:749:{44,97}] wire [15:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[15:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [15:0] _c_size_lookup_T_6 = {8'h0, _c_size_lookup_T_1}; // @[Monitor.scala:750:{42,93}] wire [15:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[15:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[7:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire d_clr_1; // @[Monitor.scala:774:34] wire d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [3:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [7:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1245 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1245 & d_release_ack_1; // @[Monitor.scala:775:34, :783:46, :784:{26,71}] assign d_clr_1 = io_in_d_valid_0 & d_first_2 & d_release_ack_1; // @[Monitor.scala:36:7, :774:34, :783:46, :788:{25,70}] assign d_opcodes_clr_1 = {4{d_clr_1}}; // @[Monitor.scala:774:34, :776:34, :788:88, :790:21] assign d_sizes_clr_1 = {8{d_clr_1}}; // @[Monitor.scala:774:34, :777:34, :788:88, :791:21] wire _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [1:0] _inflight_T_5 = {1'h0, _inflight_T_3[0] & _inflight_T_4}; // @[Monitor.scala:814:{35,44,46}] wire [3:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [3:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [7:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [7:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Replacement.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import freechips.rocketchip.util.property.cover abstract class ReplacementPolicy { def nBits: Int def perSet: Boolean def way: UInt def miss: Unit def hit: Unit def access(touch_way: UInt): Unit def access(touch_ways: Seq[Valid[UInt]]): Unit def state_read: UInt def get_next_state(state: UInt, touch_way: UInt): UInt def get_next_state(state: UInt, touch_ways: Seq[Valid[UInt]]): UInt = { touch_ways.foldLeft(state)((prev, touch_way) => Mux(touch_way.valid, get_next_state(prev, touch_way.bits), prev)) } def get_replace_way(state: UInt): UInt } object ReplacementPolicy { def fromString(s: String, n_ways: Int): ReplacementPolicy = s.toLowerCase match { case "random" => new RandomReplacement(n_ways) case "lru" => new TrueLRU(n_ways) case "plru" => new PseudoLRU(n_ways) case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t") } } class RandomReplacement(n_ways: Int) extends ReplacementPolicy { private val replace = Wire(Bool()) replace := false.B def nBits = 16 def perSet = false private val lfsr = LFSR(nBits, replace) def state_read = WireDefault(lfsr) def way = Random(n_ways, lfsr) def miss = replace := true.B def hit = {} def access(touch_way: UInt) = {} def access(touch_ways: Seq[Valid[UInt]]) = {} def get_next_state(state: UInt, touch_way: UInt) = 0.U //DontCare def get_replace_way(state: UInt) = way } abstract class SeqReplacementPolicy { def access(set: UInt): Unit def update(valid: Bool, hit: Bool, set: UInt, way: UInt): Unit def way: UInt } abstract class SetAssocReplacementPolicy { def access(set: UInt, touch_way: UInt): Unit def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]): Unit def way(set: UInt): UInt } class SeqRandom(n_ways: Int) extends SeqReplacementPolicy { val logic = new RandomReplacement(n_ways) def access(set: UInt) = { } def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = { when (valid && !hit) { logic.miss } } def way = logic.way } class TrueLRU(n_ways: Int) extends ReplacementPolicy { // True LRU replacement policy, using a triangular matrix to track which sets are more recently used than others. // The matrix is packed into a single UInt (or Bits). Example 4-way (6-bits): // [5] - 3 more recent than 2 // [4] - 3 more recent than 1 // [3] - 2 more recent than 1 // [2] - 3 more recent than 0 // [1] - 2 more recent than 0 // [0] - 1 more recent than 0 def nBits = (n_ways * (n_ways-1)) / 2 def perSet = true private val state_reg = RegInit(0.U(nBits.W)) def state_read = WireDefault(state_reg) private def extractMRUVec(state: UInt): Seq[UInt] = { // Extract per-way information about which higher-indexed ways are more recently used val moreRecentVec = Wire(Vec(n_ways-1, UInt(n_ways.W))) var lsb = 0 for (i <- 0 until n_ways-1) { moreRecentVec(i) := Cat(state(lsb+n_ways-i-2,lsb), 0.U((i+1).W)) lsb = lsb + (n_ways - i - 1) } moreRecentVec } def get_next_state(state: UInt, touch_way: UInt): UInt = { val nextState = Wire(Vec(n_ways-1, UInt(n_ways.W))) val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix val wayDec = UIntToOH(touch_way, n_ways) // Compute next value of triangular matrix // set the touched way as more recent than every other way nextState.zipWithIndex.map { case (e, i) => e := Mux(i.U === touch_way, 0.U(n_ways.W), moreRecentVec(i) | wayDec) } nextState.zipWithIndex.tail.foldLeft((nextState.head.apply(n_ways-1,1),0)) { case ((pe,pi),(ce,ci)) => (Cat(ce.apply(n_ways-1,ci+1), pe), ci) }._1 } def access(touch_way: UInt): Unit = { state_reg := get_next_state(state_reg, touch_way) } def access(touch_ways: Seq[Valid[UInt]]): Unit = { when (touch_ways.map(_.valid).orR) { state_reg := get_next_state(state_reg, touch_ways) } for (i <- 1 until touch_ways.size) { cover(PopCount(touch_ways.map(_.valid)) === i.U, s"LRU_UpdateCount$i", s"LRU Update $i simultaneous") } } def get_replace_way(state: UInt): UInt = { val moreRecentVec = extractMRUVec(state) // reconstruct lower triangular matrix // For each way, determine if all other ways are more recent val mruWayDec = (0 until n_ways).map { i => val upperMoreRecent = (if (i == n_ways-1) true.B else moreRecentVec(i).apply(n_ways-1,i+1).andR) val lowerMoreRecent = (if (i == 0) true.B else moreRecentVec.map(e => !e(i)).reduce(_ && _)) upperMoreRecent && lowerMoreRecent } OHToUInt(mruWayDec) } def way = get_replace_way(state_reg) def miss = access(way) def hit = {} @deprecated("replace 'replace' with 'way' from abstract class ReplacementPolicy","Rocket Chip 2020.05") def replace: UInt = way } class PseudoLRU(n_ways: Int) extends ReplacementPolicy { // Pseudo-LRU tree algorithm: https://en.wikipedia.org/wiki/Pseudo-LRU#Tree-PLRU // // // - bits storage example for 4-way PLRU binary tree: // bit[2]: ways 3+2 older than ways 1+0 // / \ // bit[1]: way 3 older than way 2 bit[0]: way 1 older than way 0 // // // - bits storage example for 3-way PLRU binary tree: // bit[1]: way 2 older than ways 1+0 // \ // bit[0]: way 1 older than way 0 // // // - bits storage example for 8-way PLRU binary tree: // bit[6]: ways 7-4 older than ways 3-0 // / \ // bit[5]: ways 7+6 > 5+4 bit[2]: ways 3+2 > 1+0 // / \ / \ // bit[4]: way 7>6 bit[3]: way 5>4 bit[1]: way 3>2 bit[0]: way 1>0 def nBits = n_ways - 1 def perSet = true private val state_reg = if (nBits == 0) Reg(UInt(0.W)) else RegInit(0.U(nBits.W)) def state_read = WireDefault(state_reg) def access(touch_way: UInt): Unit = { state_reg := get_next_state(state_reg, touch_way) } def access(touch_ways: Seq[Valid[UInt]]): Unit = { when (touch_ways.map(_.valid).orR) { state_reg := get_next_state(state_reg, touch_ways) } for (i <- 1 until touch_ways.size) { cover(PopCount(touch_ways.map(_.valid)) === i.U, s"PLRU_UpdateCount$i", s"PLRU Update $i simultaneous") } } /** @param state state_reg bits for this sub-tree * @param touch_way touched way encoded value bits for this sub-tree * @param tree_nways number of ways in this sub-tree */ def get_next_state(state: UInt, touch_way: UInt, tree_nways: Int): UInt = { require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways") require(touch_way.getWidth == (log2Ceil(tree_nways) max 1), s"wrong encoded way width ${touch_way.getWidth} for $tree_nways ways") if (tree_nways > 2) { // we are at a branching node in the tree, so recurse val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree val set_left_older = !touch_way(log2Ceil(tree_nways)-1) val left_subtree_state = state.extract(tree_nways-3, right_nways-1) val right_subtree_state = state(right_nways-2, 0) if (left_nways > 1) { // we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees Cat(set_left_older, Mux(set_left_older, left_subtree_state, // if setting left sub-tree as older, do NOT recurse into left sub-tree get_next_state(left_subtree_state, touch_way.extract(log2Ceil(left_nways)-1,0), left_nways)), // recurse left if newer Mux(set_left_older, get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree } else { // we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree Cat(set_left_older, Mux(set_left_older, get_next_state(right_subtree_state, touch_way(log2Ceil(right_nways)-1,0), right_nways), // recurse right if newer right_subtree_state)) // if setting right sub-tree as older, do NOT recurse into right sub-tree } } else if (tree_nways == 2) { // we are at a leaf node at the end of the tree, so set the single state bit opposite of the lsb of the touched way encoded value !touch_way(0) } else { // tree_nways <= 1 // we are at an empty node in an empty tree for 1 way, so return single zero bit for Chisel (no zero-width wires) 0.U(1.W) } } def get_next_state(state: UInt, touch_way: UInt): UInt = { val touch_way_sized = if (touch_way.getWidth < log2Ceil(n_ways)) touch_way.padTo (log2Ceil(n_ways)) else touch_way.extract(log2Ceil(n_ways)-1,0) get_next_state(state, touch_way_sized, n_ways) } /** @param state state_reg bits for this sub-tree * @param tree_nways number of ways in this sub-tree */ def get_replace_way(state: UInt, tree_nways: Int): UInt = { require(state.getWidth == (tree_nways-1), s"wrong state bits width ${state.getWidth} for $tree_nways ways") // this algorithm recursively descends the binary tree, filling in the way-to-replace encoded value from msb to lsb if (tree_nways > 2) { // we are at a branching node in the tree, so recurse val right_nways: Int = 1 << (log2Ceil(tree_nways) - 1) // number of ways in the right sub-tree val left_nways: Int = tree_nways - right_nways // number of ways in the left sub-tree val left_subtree_older = state(tree_nways-2) val left_subtree_state = state.extract(tree_nways-3, right_nways-1) val right_subtree_state = state(right_nways-2, 0) if (left_nways > 1) { // we are at a branching node in the tree with both left and right sub-trees, so recurse both sub-trees Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value Mux(left_subtree_older, // if left sub-tree is older, recurse left, else recurse right get_replace_way(left_subtree_state, left_nways), // recurse left get_replace_way(right_subtree_state, right_nways))) // recurse right } else { // we are at a branching node in the tree with only a right sub-tree, so recurse only right sub-tree Cat(left_subtree_older, // return the top state bit (current tree node) as msb of the way-to-replace encoded value Mux(left_subtree_older, // if left sub-tree is older, return and do not recurse right 0.U(1.W), get_replace_way(right_subtree_state, right_nways))) // recurse right } } else if (tree_nways == 2) { // we are at a leaf node at the end of the tree, so just return the single state bit as lsb of the way-to-replace encoded value state(0) } else { // tree_nways <= 1 // we are at an empty node in an unbalanced tree for non-power-of-2 ways, so return single zero bit as lsb of the way-to-replace encoded value 0.U(1.W) } } def get_replace_way(state: UInt): UInt = get_replace_way(state, n_ways) def way = get_replace_way(state_reg) def miss = access(way) def hit = {} } class SeqPLRU(n_sets: Int, n_ways: Int) extends SeqReplacementPolicy { val logic = new PseudoLRU(n_ways) val state = SyncReadMem(n_sets, UInt(logic.nBits.W)) val current_state = Wire(UInt(logic.nBits.W)) val next_state = Wire(UInt(logic.nBits.W)) val plru_way = logic.get_replace_way(current_state) def access(set: UInt) = { current_state := state.read(set) } def update(valid: Bool, hit: Bool, set: UInt, way: UInt) = { val update_way = Mux(hit, way, plru_way) next_state := logic.get_next_state(current_state, update_way) when (valid) { state.write(set, next_state) } } def way = plru_way } class SetAssocLRU(n_sets: Int, n_ways: Int, policy: String) extends SetAssocReplacementPolicy { val logic = policy.toLowerCase match { case "plru" => new PseudoLRU(n_ways) case "lru" => new TrueLRU(n_ways) case t => throw new IllegalArgumentException(s"unknown Replacement Policy type $t") } val state_vec = if (logic.nBits == 0) Reg(Vec(n_sets, UInt(logic.nBits.W))) // Work around elaboration error on following line else RegInit(VecInit(Seq.fill(n_sets)(0.U(logic.nBits.W)))) def access(set: UInt, touch_way: UInt) = { state_vec(set) := logic.get_next_state(state_vec(set), touch_way) } def access(sets: Seq[UInt], touch_ways: Seq[Valid[UInt]]) = { require(sets.size == touch_ways.size, "internal consistency check: should be same number of simultaneous updates for sets and touch_ways") for (set <- 0 until n_sets) { val set_touch_ways = (sets zip touch_ways).map { case (touch_set, touch_way) => Pipe(touch_way.valid && (touch_set === set.U), touch_way.bits, 0)} when (set_touch_ways.map(_.valid).orR) { state_vec(set) := logic.get_next_state(state_vec(set), set_touch_ways) } } } def way(set: UInt) = logic.get_replace_way(state_vec(set)) } // Synthesizable unit tests import freechips.rocketchip.unittest._ class PLRUTest(n_ways: Int, timeout: Int = 500) extends UnitTest(timeout) { val plru = new PseudoLRU(n_ways) // step io.finished := RegNext(true.B, false.B) val get_replace_ways = (0 until (1 << (n_ways-1))).map(state => plru.get_replace_way(state = state.U((n_ways-1).W))) val get_next_states = (0 until (1 << (n_ways-1))).map(state => (0 until n_ways).map(way => plru.get_next_state (state = state.U((n_ways-1).W), touch_way = way.U(log2Ceil(n_ways).W)))) n_ways match { case 2 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_next_states(0)(0) === 1.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=1 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 0.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=0 actual=%d", get_next_states(0)(1)) assert(get_next_states(1)(0) === 1.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=1 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 0.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=0 actual=%d", get_next_states(1)(1)) } case 3 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_replace_ways(2) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=2 actual=%d", get_replace_ways(2)) assert(get_replace_ways(3) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=2 actual=%d", get_replace_ways(3)) assert(get_next_states(0)(0) === 3.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=3 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 2.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=2 actual=%d", get_next_states(0)(1)) assert(get_next_states(0)(2) === 0.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=0 actual=%d", get_next_states(0)(2)) assert(get_next_states(1)(0) === 3.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=3 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 2.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=2 actual=%d", get_next_states(1)(1)) assert(get_next_states(1)(2) === 1.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=1 actual=%d", get_next_states(1)(2)) assert(get_next_states(2)(0) === 3.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=3 actual=%d", get_next_states(2)(0)) assert(get_next_states(2)(1) === 2.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=2 actual=%d", get_next_states(2)(1)) assert(get_next_states(2)(2) === 0.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=0 actual=%d", get_next_states(2)(2)) assert(get_next_states(3)(0) === 3.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=3 actual=%d", get_next_states(3)(0)) assert(get_next_states(3)(1) === 2.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=2 actual=%d", get_next_states(3)(1)) assert(get_next_states(3)(2) === 1.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=1 actual=%d", get_next_states(3)(2)) } case 4 => { assert(get_replace_ways(0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=0: expected=0 actual=%d", get_replace_ways(0)) assert(get_replace_ways(1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=1: expected=1 actual=%d", get_replace_ways(1)) assert(get_replace_ways(2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=2: expected=0 actual=%d", get_replace_ways(2)) assert(get_replace_ways(3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=3: expected=1 actual=%d", get_replace_ways(3)) assert(get_replace_ways(4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=4: expected=2 actual=%d", get_replace_ways(4)) assert(get_replace_ways(5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=5: expected=2 actual=%d", get_replace_ways(5)) assert(get_replace_ways(6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=6: expected=3 actual=%d", get_replace_ways(6)) assert(get_replace_ways(7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=7: expected=3 actual=%d", get_replace_ways(7)) assert(get_next_states(0)(0) === 5.U(plru.nBits.W), s"get_next_state state=0 way=0: expected=5 actual=%d", get_next_states(0)(0)) assert(get_next_states(0)(1) === 4.U(plru.nBits.W), s"get_next_state state=0 way=1: expected=4 actual=%d", get_next_states(0)(1)) assert(get_next_states(0)(2) === 2.U(plru.nBits.W), s"get_next_state state=0 way=2: expected=2 actual=%d", get_next_states(0)(2)) assert(get_next_states(0)(3) === 0.U(plru.nBits.W), s"get_next_state state=0 way=3: expected=0 actual=%d", get_next_states(0)(3)) assert(get_next_states(1)(0) === 5.U(plru.nBits.W), s"get_next_state state=1 way=0: expected=5 actual=%d", get_next_states(1)(0)) assert(get_next_states(1)(1) === 4.U(plru.nBits.W), s"get_next_state state=1 way=1: expected=4 actual=%d", get_next_states(1)(1)) assert(get_next_states(1)(2) === 3.U(plru.nBits.W), s"get_next_state state=1 way=2: expected=3 actual=%d", get_next_states(1)(2)) assert(get_next_states(1)(3) === 1.U(plru.nBits.W), s"get_next_state state=1 way=3: expected=1 actual=%d", get_next_states(1)(3)) assert(get_next_states(2)(0) === 7.U(plru.nBits.W), s"get_next_state state=2 way=0: expected=7 actual=%d", get_next_states(2)(0)) assert(get_next_states(2)(1) === 6.U(plru.nBits.W), s"get_next_state state=2 way=1: expected=6 actual=%d", get_next_states(2)(1)) assert(get_next_states(2)(2) === 2.U(plru.nBits.W), s"get_next_state state=2 way=2: expected=2 actual=%d", get_next_states(2)(2)) assert(get_next_states(2)(3) === 0.U(plru.nBits.W), s"get_next_state state=2 way=3: expected=0 actual=%d", get_next_states(2)(3)) assert(get_next_states(3)(0) === 7.U(plru.nBits.W), s"get_next_state state=3 way=0: expected=7 actual=%d", get_next_states(3)(0)) assert(get_next_states(3)(1) === 6.U(plru.nBits.W), s"get_next_state state=3 way=1: expected=6 actual=%d", get_next_states(3)(1)) assert(get_next_states(3)(2) === 3.U(plru.nBits.W), s"get_next_state state=3 way=2: expected=3 actual=%d", get_next_states(3)(2)) assert(get_next_states(3)(3) === 1.U(plru.nBits.W), s"get_next_state state=3 way=3: expected=1 actual=%d", get_next_states(3)(3)) assert(get_next_states(4)(0) === 5.U(plru.nBits.W), s"get_next_state state=4 way=0: expected=5 actual=%d", get_next_states(4)(0)) assert(get_next_states(4)(1) === 4.U(plru.nBits.W), s"get_next_state state=4 way=1: expected=4 actual=%d", get_next_states(4)(1)) assert(get_next_states(4)(2) === 2.U(plru.nBits.W), s"get_next_state state=4 way=2: expected=2 actual=%d", get_next_states(4)(2)) assert(get_next_states(4)(3) === 0.U(plru.nBits.W), s"get_next_state state=4 way=3: expected=0 actual=%d", get_next_states(4)(3)) assert(get_next_states(5)(0) === 5.U(plru.nBits.W), s"get_next_state state=5 way=0: expected=5 actual=%d", get_next_states(5)(0)) assert(get_next_states(5)(1) === 4.U(plru.nBits.W), s"get_next_state state=5 way=1: expected=4 actual=%d", get_next_states(5)(1)) assert(get_next_states(5)(2) === 3.U(plru.nBits.W), s"get_next_state state=5 way=2: expected=3 actual=%d", get_next_states(5)(2)) assert(get_next_states(5)(3) === 1.U(plru.nBits.W), s"get_next_state state=5 way=3: expected=1 actual=%d", get_next_states(5)(3)) assert(get_next_states(6)(0) === 7.U(plru.nBits.W), s"get_next_state state=6 way=0: expected=7 actual=%d", get_next_states(6)(0)) assert(get_next_states(6)(1) === 6.U(plru.nBits.W), s"get_next_state state=6 way=1: expected=6 actual=%d", get_next_states(6)(1)) assert(get_next_states(6)(2) === 2.U(plru.nBits.W), s"get_next_state state=6 way=2: expected=2 actual=%d", get_next_states(6)(2)) assert(get_next_states(6)(3) === 0.U(plru.nBits.W), s"get_next_state state=6 way=3: expected=0 actual=%d", get_next_states(6)(3)) assert(get_next_states(7)(0) === 7.U(plru.nBits.W), s"get_next_state state=7 way=0: expected=7 actual=%d", get_next_states(7)(0)) assert(get_next_states(7)(1) === 6.U(plru.nBits.W), s"get_next_state state=7 way=5: expected=6 actual=%d", get_next_states(7)(1)) assert(get_next_states(7)(2) === 3.U(plru.nBits.W), s"get_next_state state=7 way=2: expected=3 actual=%d", get_next_states(7)(2)) assert(get_next_states(7)(3) === 1.U(plru.nBits.W), s"get_next_state state=7 way=3: expected=1 actual=%d", get_next_states(7)(3)) } case 5 => { assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0)) assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1)) assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2)) assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3)) assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4)) assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5)) assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6)) assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7)) assert(get_replace_ways( 8) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=4 actual=%d", get_replace_ways( 8)) assert(get_replace_ways( 9) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=4 actual=%d", get_replace_ways( 9)) assert(get_replace_ways(10) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=4 actual=%d", get_replace_ways(10)) assert(get_replace_ways(11) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=4 actual=%d", get_replace_ways(11)) assert(get_replace_ways(12) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=4 actual=%d", get_replace_ways(12)) assert(get_replace_ways(13) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=4 actual=%d", get_replace_ways(13)) assert(get_replace_ways(14) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=4 actual=%d", get_replace_ways(14)) assert(get_replace_ways(15) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=4 actual=%d", get_replace_ways(15)) assert(get_next_states( 0)(0) === 13.U(plru.nBits.W), s"get_next_state state=00 way=0: expected=13 actual=%d", get_next_states( 0)(0)) assert(get_next_states( 0)(1) === 12.U(plru.nBits.W), s"get_next_state state=00 way=1: expected=12 actual=%d", get_next_states( 0)(1)) assert(get_next_states( 0)(2) === 10.U(plru.nBits.W), s"get_next_state state=00 way=2: expected=10 actual=%d", get_next_states( 0)(2)) assert(get_next_states( 0)(3) === 8.U(plru.nBits.W), s"get_next_state state=00 way=3: expected=08 actual=%d", get_next_states( 0)(3)) assert(get_next_states( 0)(4) === 0.U(plru.nBits.W), s"get_next_state state=00 way=4: expected=00 actual=%d", get_next_states( 0)(4)) assert(get_next_states( 1)(0) === 13.U(plru.nBits.W), s"get_next_state state=01 way=0: expected=13 actual=%d", get_next_states( 1)(0)) assert(get_next_states( 1)(1) === 12.U(plru.nBits.W), s"get_next_state state=01 way=1: expected=12 actual=%d", get_next_states( 1)(1)) assert(get_next_states( 1)(2) === 11.U(plru.nBits.W), s"get_next_state state=01 way=2: expected=11 actual=%d", get_next_states( 1)(2)) assert(get_next_states( 1)(3) === 9.U(plru.nBits.W), s"get_next_state state=01 way=3: expected=09 actual=%d", get_next_states( 1)(3)) assert(get_next_states( 1)(4) === 1.U(plru.nBits.W), s"get_next_state state=01 way=4: expected=01 actual=%d", get_next_states( 1)(4)) assert(get_next_states( 2)(0) === 15.U(plru.nBits.W), s"get_next_state state=02 way=0: expected=15 actual=%d", get_next_states( 2)(0)) assert(get_next_states( 2)(1) === 14.U(plru.nBits.W), s"get_next_state state=02 way=1: expected=14 actual=%d", get_next_states( 2)(1)) assert(get_next_states( 2)(2) === 10.U(plru.nBits.W), s"get_next_state state=02 way=2: expected=10 actual=%d", get_next_states( 2)(2)) assert(get_next_states( 2)(3) === 8.U(plru.nBits.W), s"get_next_state state=02 way=3: expected=08 actual=%d", get_next_states( 2)(3)) assert(get_next_states( 2)(4) === 2.U(plru.nBits.W), s"get_next_state state=02 way=4: expected=02 actual=%d", get_next_states( 2)(4)) assert(get_next_states( 3)(0) === 15.U(plru.nBits.W), s"get_next_state state=03 way=0: expected=15 actual=%d", get_next_states( 3)(0)) assert(get_next_states( 3)(1) === 14.U(plru.nBits.W), s"get_next_state state=03 way=1: expected=14 actual=%d", get_next_states( 3)(1)) assert(get_next_states( 3)(2) === 11.U(plru.nBits.W), s"get_next_state state=03 way=2: expected=11 actual=%d", get_next_states( 3)(2)) assert(get_next_states( 3)(3) === 9.U(plru.nBits.W), s"get_next_state state=03 way=3: expected=09 actual=%d", get_next_states( 3)(3)) assert(get_next_states( 3)(4) === 3.U(plru.nBits.W), s"get_next_state state=03 way=4: expected=03 actual=%d", get_next_states( 3)(4)) assert(get_next_states( 4)(0) === 13.U(plru.nBits.W), s"get_next_state state=04 way=0: expected=13 actual=%d", get_next_states( 4)(0)) assert(get_next_states( 4)(1) === 12.U(plru.nBits.W), s"get_next_state state=04 way=1: expected=12 actual=%d", get_next_states( 4)(1)) assert(get_next_states( 4)(2) === 10.U(plru.nBits.W), s"get_next_state state=04 way=2: expected=10 actual=%d", get_next_states( 4)(2)) assert(get_next_states( 4)(3) === 8.U(plru.nBits.W), s"get_next_state state=04 way=3: expected=08 actual=%d", get_next_states( 4)(3)) assert(get_next_states( 4)(4) === 4.U(plru.nBits.W), s"get_next_state state=04 way=4: expected=04 actual=%d", get_next_states( 4)(4)) assert(get_next_states( 5)(0) === 13.U(plru.nBits.W), s"get_next_state state=05 way=0: expected=13 actual=%d", get_next_states( 5)(0)) assert(get_next_states( 5)(1) === 12.U(plru.nBits.W), s"get_next_state state=05 way=1: expected=12 actual=%d", get_next_states( 5)(1)) assert(get_next_states( 5)(2) === 11.U(plru.nBits.W), s"get_next_state state=05 way=2: expected=11 actual=%d", get_next_states( 5)(2)) assert(get_next_states( 5)(3) === 9.U(plru.nBits.W), s"get_next_state state=05 way=3: expected=09 actual=%d", get_next_states( 5)(3)) assert(get_next_states( 5)(4) === 5.U(plru.nBits.W), s"get_next_state state=05 way=4: expected=05 actual=%d", get_next_states( 5)(4)) assert(get_next_states( 6)(0) === 15.U(plru.nBits.W), s"get_next_state state=06 way=0: expected=15 actual=%d", get_next_states( 6)(0)) assert(get_next_states( 6)(1) === 14.U(plru.nBits.W), s"get_next_state state=06 way=1: expected=14 actual=%d", get_next_states( 6)(1)) assert(get_next_states( 6)(2) === 10.U(plru.nBits.W), s"get_next_state state=06 way=2: expected=10 actual=%d", get_next_states( 6)(2)) assert(get_next_states( 6)(3) === 8.U(plru.nBits.W), s"get_next_state state=06 way=3: expected=08 actual=%d", get_next_states( 6)(3)) assert(get_next_states( 6)(4) === 6.U(plru.nBits.W), s"get_next_state state=06 way=4: expected=06 actual=%d", get_next_states( 6)(4)) assert(get_next_states( 7)(0) === 15.U(plru.nBits.W), s"get_next_state state=07 way=0: expected=15 actual=%d", get_next_states( 7)(0)) assert(get_next_states( 7)(1) === 14.U(plru.nBits.W), s"get_next_state state=07 way=5: expected=14 actual=%d", get_next_states( 7)(1)) assert(get_next_states( 7)(2) === 11.U(plru.nBits.W), s"get_next_state state=07 way=2: expected=11 actual=%d", get_next_states( 7)(2)) assert(get_next_states( 7)(3) === 9.U(plru.nBits.W), s"get_next_state state=07 way=3: expected=09 actual=%d", get_next_states( 7)(3)) assert(get_next_states( 7)(4) === 7.U(plru.nBits.W), s"get_next_state state=07 way=4: expected=07 actual=%d", get_next_states( 7)(4)) assert(get_next_states( 8)(0) === 13.U(plru.nBits.W), s"get_next_state state=08 way=0: expected=13 actual=%d", get_next_states( 8)(0)) assert(get_next_states( 8)(1) === 12.U(plru.nBits.W), s"get_next_state state=08 way=1: expected=12 actual=%d", get_next_states( 8)(1)) assert(get_next_states( 8)(2) === 10.U(plru.nBits.W), s"get_next_state state=08 way=2: expected=10 actual=%d", get_next_states( 8)(2)) assert(get_next_states( 8)(3) === 8.U(plru.nBits.W), s"get_next_state state=08 way=3: expected=08 actual=%d", get_next_states( 8)(3)) assert(get_next_states( 8)(4) === 0.U(plru.nBits.W), s"get_next_state state=08 way=4: expected=00 actual=%d", get_next_states( 8)(4)) assert(get_next_states( 9)(0) === 13.U(plru.nBits.W), s"get_next_state state=09 way=0: expected=13 actual=%d", get_next_states( 9)(0)) assert(get_next_states( 9)(1) === 12.U(plru.nBits.W), s"get_next_state state=09 way=1: expected=12 actual=%d", get_next_states( 9)(1)) assert(get_next_states( 9)(2) === 11.U(plru.nBits.W), s"get_next_state state=09 way=2: expected=11 actual=%d", get_next_states( 9)(2)) assert(get_next_states( 9)(3) === 9.U(plru.nBits.W), s"get_next_state state=09 way=3: expected=09 actual=%d", get_next_states( 9)(3)) assert(get_next_states( 9)(4) === 1.U(plru.nBits.W), s"get_next_state state=09 way=4: expected=01 actual=%d", get_next_states( 9)(4)) assert(get_next_states(10)(0) === 15.U(plru.nBits.W), s"get_next_state state=10 way=0: expected=15 actual=%d", get_next_states(10)(0)) assert(get_next_states(10)(1) === 14.U(plru.nBits.W), s"get_next_state state=10 way=1: expected=14 actual=%d", get_next_states(10)(1)) assert(get_next_states(10)(2) === 10.U(plru.nBits.W), s"get_next_state state=10 way=2: expected=10 actual=%d", get_next_states(10)(2)) assert(get_next_states(10)(3) === 8.U(plru.nBits.W), s"get_next_state state=10 way=3: expected=08 actual=%d", get_next_states(10)(3)) assert(get_next_states(10)(4) === 2.U(plru.nBits.W), s"get_next_state state=10 way=4: expected=02 actual=%d", get_next_states(10)(4)) assert(get_next_states(11)(0) === 15.U(plru.nBits.W), s"get_next_state state=11 way=0: expected=15 actual=%d", get_next_states(11)(0)) assert(get_next_states(11)(1) === 14.U(plru.nBits.W), s"get_next_state state=11 way=1: expected=14 actual=%d", get_next_states(11)(1)) assert(get_next_states(11)(2) === 11.U(plru.nBits.W), s"get_next_state state=11 way=2: expected=11 actual=%d", get_next_states(11)(2)) assert(get_next_states(11)(3) === 9.U(plru.nBits.W), s"get_next_state state=11 way=3: expected=09 actual=%d", get_next_states(11)(3)) assert(get_next_states(11)(4) === 3.U(plru.nBits.W), s"get_next_state state=11 way=4: expected=03 actual=%d", get_next_states(11)(4)) assert(get_next_states(12)(0) === 13.U(plru.nBits.W), s"get_next_state state=12 way=0: expected=13 actual=%d", get_next_states(12)(0)) assert(get_next_states(12)(1) === 12.U(plru.nBits.W), s"get_next_state state=12 way=1: expected=12 actual=%d", get_next_states(12)(1)) assert(get_next_states(12)(2) === 10.U(plru.nBits.W), s"get_next_state state=12 way=2: expected=10 actual=%d", get_next_states(12)(2)) assert(get_next_states(12)(3) === 8.U(plru.nBits.W), s"get_next_state state=12 way=3: expected=08 actual=%d", get_next_states(12)(3)) assert(get_next_states(12)(4) === 4.U(plru.nBits.W), s"get_next_state state=12 way=4: expected=04 actual=%d", get_next_states(12)(4)) assert(get_next_states(13)(0) === 13.U(plru.nBits.W), s"get_next_state state=13 way=0: expected=13 actual=%d", get_next_states(13)(0)) assert(get_next_states(13)(1) === 12.U(plru.nBits.W), s"get_next_state state=13 way=1: expected=12 actual=%d", get_next_states(13)(1)) assert(get_next_states(13)(2) === 11.U(plru.nBits.W), s"get_next_state state=13 way=2: expected=11 actual=%d", get_next_states(13)(2)) assert(get_next_states(13)(3) === 9.U(plru.nBits.W), s"get_next_state state=13 way=3: expected=09 actual=%d", get_next_states(13)(3)) assert(get_next_states(13)(4) === 5.U(plru.nBits.W), s"get_next_state state=13 way=4: expected=05 actual=%d", get_next_states(13)(4)) assert(get_next_states(14)(0) === 15.U(plru.nBits.W), s"get_next_state state=14 way=0: expected=15 actual=%d", get_next_states(14)(0)) assert(get_next_states(14)(1) === 14.U(plru.nBits.W), s"get_next_state state=14 way=1: expected=14 actual=%d", get_next_states(14)(1)) assert(get_next_states(14)(2) === 10.U(plru.nBits.W), s"get_next_state state=14 way=2: expected=10 actual=%d", get_next_states(14)(2)) assert(get_next_states(14)(3) === 8.U(plru.nBits.W), s"get_next_state state=14 way=3: expected=08 actual=%d", get_next_states(14)(3)) assert(get_next_states(14)(4) === 6.U(plru.nBits.W), s"get_next_state state=14 way=4: expected=06 actual=%d", get_next_states(14)(4)) assert(get_next_states(15)(0) === 15.U(plru.nBits.W), s"get_next_state state=15 way=0: expected=15 actual=%d", get_next_states(15)(0)) assert(get_next_states(15)(1) === 14.U(plru.nBits.W), s"get_next_state state=15 way=5: expected=14 actual=%d", get_next_states(15)(1)) assert(get_next_states(15)(2) === 11.U(plru.nBits.W), s"get_next_state state=15 way=2: expected=11 actual=%d", get_next_states(15)(2)) assert(get_next_states(15)(3) === 9.U(plru.nBits.W), s"get_next_state state=15 way=3: expected=09 actual=%d", get_next_states(15)(3)) assert(get_next_states(15)(4) === 7.U(plru.nBits.W), s"get_next_state state=15 way=4: expected=07 actual=%d", get_next_states(15)(4)) } case 6 => { assert(get_replace_ways( 0) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=00: expected=0 actual=%d", get_replace_ways( 0)) assert(get_replace_ways( 1) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=01: expected=1 actual=%d", get_replace_ways( 1)) assert(get_replace_ways( 2) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=02: expected=0 actual=%d", get_replace_ways( 2)) assert(get_replace_ways( 3) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=03: expected=1 actual=%d", get_replace_ways( 3)) assert(get_replace_ways( 4) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=04: expected=2 actual=%d", get_replace_ways( 4)) assert(get_replace_ways( 5) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=05: expected=2 actual=%d", get_replace_ways( 5)) assert(get_replace_ways( 6) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=06: expected=3 actual=%d", get_replace_ways( 6)) assert(get_replace_ways( 7) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=07: expected=3 actual=%d", get_replace_ways( 7)) assert(get_replace_ways( 8) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=08: expected=0 actual=%d", get_replace_ways( 8)) assert(get_replace_ways( 9) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=09: expected=1 actual=%d", get_replace_ways( 9)) assert(get_replace_ways(10) === 0.U(log2Ceil(n_ways).W), s"get_replace_way state=10: expected=0 actual=%d", get_replace_ways(10)) assert(get_replace_ways(11) === 1.U(log2Ceil(n_ways).W), s"get_replace_way state=11: expected=1 actual=%d", get_replace_ways(11)) assert(get_replace_ways(12) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=12: expected=2 actual=%d", get_replace_ways(12)) assert(get_replace_ways(13) === 2.U(log2Ceil(n_ways).W), s"get_replace_way state=13: expected=2 actual=%d", get_replace_ways(13)) assert(get_replace_ways(14) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=14: expected=3 actual=%d", get_replace_ways(14)) assert(get_replace_ways(15) === 3.U(log2Ceil(n_ways).W), s"get_replace_way state=15: expected=3 actual=%d", get_replace_ways(15)) assert(get_replace_ways(16) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=16: expected=4 actual=%d", get_replace_ways(16)) assert(get_replace_ways(17) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=17: expected=4 actual=%d", get_replace_ways(17)) assert(get_replace_ways(18) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=18: expected=4 actual=%d", get_replace_ways(18)) assert(get_replace_ways(19) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=19: expected=4 actual=%d", get_replace_ways(19)) assert(get_replace_ways(20) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=20: expected=4 actual=%d", get_replace_ways(20)) assert(get_replace_ways(21) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=21: expected=4 actual=%d", get_replace_ways(21)) assert(get_replace_ways(22) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=22: expected=4 actual=%d", get_replace_ways(22)) assert(get_replace_ways(23) === 4.U(log2Ceil(n_ways).W), s"get_replace_way state=23: expected=4 actual=%d", get_replace_ways(23)) assert(get_replace_ways(24) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=24: expected=5 actual=%d", get_replace_ways(24)) assert(get_replace_ways(25) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=25: expected=5 actual=%d", get_replace_ways(25)) assert(get_replace_ways(26) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=26: expected=5 actual=%d", get_replace_ways(26)) assert(get_replace_ways(27) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=27: expected=5 actual=%d", get_replace_ways(27)) assert(get_replace_ways(28) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=28: expected=5 actual=%d", get_replace_ways(28)) assert(get_replace_ways(29) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=29: expected=5 actual=%d", get_replace_ways(29)) assert(get_replace_ways(30) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=30: expected=5 actual=%d", get_replace_ways(30)) assert(get_replace_ways(31) === 5.U(log2Ceil(n_ways).W), s"get_replace_way state=31: expected=5 actual=%d", get_replace_ways(31)) } case _ => throw new IllegalArgumentException(s"no test pattern found for n_ways=$n_ways") } } File Consts.scala: // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket.constants import chisel3._ import chisel3.util._ import freechips.rocketchip.util._ trait ScalarOpConstants { val SZ_BR = 3 def BR_X = BitPat("b???") def BR_EQ = 0.U(3.W) def BR_NE = 1.U(3.W) def BR_J = 2.U(3.W) def BR_N = 3.U(3.W) def BR_LT = 4.U(3.W) def BR_GE = 5.U(3.W) def BR_LTU = 6.U(3.W) def BR_GEU = 7.U(3.W) def A1_X = BitPat("b??") def A1_ZERO = 0.U(2.W) def A1_RS1 = 1.U(2.W) def A1_PC = 2.U(2.W) def A1_RS1SHL = 3.U(2.W) def IMM_X = BitPat("b???") def IMM_S = 0.U(3.W) def IMM_SB = 1.U(3.W) def IMM_U = 2.U(3.W) def IMM_UJ = 3.U(3.W) def IMM_I = 4.U(3.W) def IMM_Z = 5.U(3.W) def A2_X = BitPat("b???") def A2_ZERO = 0.U(3.W) def A2_SIZE = 1.U(3.W) def A2_RS2 = 2.U(3.W) def A2_IMM = 3.U(3.W) def A2_RS2OH = 4.U(3.W) def A2_IMMOH = 5.U(3.W) def X = BitPat("b?") def N = BitPat("b0") def Y = BitPat("b1") val SZ_DW = 1 def DW_X = X def DW_32 = false.B def DW_64 = true.B def DW_XPR = DW_64 } trait MemoryOpConstants { val NUM_XA_OPS = 9 val M_SZ = 5 def M_X = BitPat("b?????"); def M_XRD = "b00000".U; // int load def M_XWR = "b00001".U; // int store def M_PFR = "b00010".U; // prefetch with intent to read def M_PFW = "b00011".U; // prefetch with intent to write def M_XA_SWAP = "b00100".U def M_FLUSH_ALL = "b00101".U // flush all lines def M_XLR = "b00110".U def M_XSC = "b00111".U def M_XA_ADD = "b01000".U def M_XA_XOR = "b01001".U def M_XA_OR = "b01010".U def M_XA_AND = "b01011".U def M_XA_MIN = "b01100".U def M_XA_MAX = "b01101".U def M_XA_MINU = "b01110".U def M_XA_MAXU = "b01111".U def M_FLUSH = "b10000".U // write back dirty data and cede R/W permissions def M_PWR = "b10001".U // partial (masked) store def M_PRODUCE = "b10010".U // write back dirty data and cede W permissions def M_CLEAN = "b10011".U // write back dirty data and retain R/W permissions def M_SFENCE = "b10100".U // SFENCE.VMA def M_HFENCEV = "b10101".U // HFENCE.VVMA def M_HFENCEG = "b10110".U // HFENCE.GVMA def M_WOK = "b10111".U // check write permissions but don't perform a write def M_HLVX = "b10000".U // HLVX instruction def isAMOLogical(cmd: UInt) = cmd.isOneOf(M_XA_SWAP, M_XA_XOR, M_XA_OR, M_XA_AND) def isAMOArithmetic(cmd: UInt) = cmd.isOneOf(M_XA_ADD, M_XA_MIN, M_XA_MAX, M_XA_MINU, M_XA_MAXU) def isAMO(cmd: UInt) = isAMOLogical(cmd) || isAMOArithmetic(cmd) def isPrefetch(cmd: UInt) = cmd === M_PFR || cmd === M_PFW def isRead(cmd: UInt) = cmd.isOneOf(M_XRD, M_HLVX, M_XLR, M_XSC) || isAMO(cmd) def isWrite(cmd: UInt) = cmd === M_XWR || cmd === M_PWR || cmd === M_XSC || isAMO(cmd) def isWriteIntent(cmd: UInt) = isWrite(cmd) || cmd === M_PFW || cmd === M_XLR } File TLB.scala: // See LICENSE.SiFive for license details. // See LICENSE.Berkeley for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import freechips.rocketchip.devices.debug.DebugModuleKey import freechips.rocketchip.diplomacy.RegionType import freechips.rocketchip.subsystem.CacheBlockBytes import freechips.rocketchip.tile.{CoreModule, CoreBundle} import freechips.rocketchip.tilelink._ import freechips.rocketchip.util.{OptimizationBarrier, SetAssocLRU, PseudoLRU, PopCountAtLeast, property} import freechips.rocketchip.util.BooleanToAugmentedBoolean import freechips.rocketchip.util.IntToAugmentedInt import freechips.rocketchip.util.UIntToAugmentedUInt import freechips.rocketchip.util.UIntIsOneOf import freechips.rocketchip.util.SeqToAugmentedSeq import freechips.rocketchip.util.SeqBoolBitwiseOps case object ASIdBits extends Field[Int](0) case object VMIdBits extends Field[Int](0) /** =SFENCE= * rs1 rs2 * {{{ * 0 0 -> flush All * 0 1 -> flush by ASID * 1 1 -> flush by ADDR * 1 0 -> flush by ADDR and ASID * }}} * {{{ * If rs1=x0 and rs2=x0, the fence orders all reads and writes made to any level of the page tables, for all address spaces. * If rs1=x0 and rs2!=x0, the fence orders all reads and writes made to any level of the page tables, but only for the address space identified by integer register rs2. Accesses to global mappings (see Section 4.3.1) are not ordered. * If rs1!=x0 and rs2=x0, the fence orders only reads and writes made to the leaf page table entry corresponding to the virtual address in rs1, for all address spaces. * If rs1!=x0 and rs2!=x0, the fence orders only reads and writes made to the leaf page table entry corresponding to the virtual address in rs1, for the address space identified by integer register rs2. Accesses to global mappings are not ordered. * }}} */ class SFenceReq(implicit p: Parameters) extends CoreBundle()(p) { val rs1 = Bool() val rs2 = Bool() val addr = UInt(vaddrBits.W) val asid = UInt((asIdBits max 1).W) // TODO zero-width val hv = Bool() val hg = Bool() } class TLBReq(lgMaxSize: Int)(implicit p: Parameters) extends CoreBundle()(p) { /** request address from CPU. */ val vaddr = UInt(vaddrBitsExtended.W) /** don't lookup TLB, bypass vaddr as paddr */ val passthrough = Bool() /** granularity */ val size = UInt(log2Ceil(lgMaxSize + 1).W) /** memory command. */ val cmd = Bits(M_SZ.W) val prv = UInt(PRV.SZ.W) /** virtualization mode */ val v = Bool() } class TLBExceptions extends Bundle { val ld = Bool() val st = Bool() val inst = Bool() } class TLBResp(lgMaxSize: Int = 3)(implicit p: Parameters) extends CoreBundle()(p) { // lookup responses val miss = Bool() /** physical address */ val paddr = UInt(paddrBits.W) val gpa = UInt(vaddrBitsExtended.W) val gpa_is_pte = Bool() /** page fault exception */ val pf = new TLBExceptions /** guest page fault exception */ val gf = new TLBExceptions /** access exception */ val ae = new TLBExceptions /** misaligned access exception */ val ma = new TLBExceptions /** if this address is cacheable */ val cacheable = Bool() /** if caches must allocate this address */ val must_alloc = Bool() /** if this address is prefetchable for caches*/ val prefetchable = Bool() /** size/cmd of request that generated this response*/ val size = UInt(log2Ceil(lgMaxSize + 1).W) val cmd = UInt(M_SZ.W) } class TLBEntryData(implicit p: Parameters) extends CoreBundle()(p) { val ppn = UInt(ppnBits.W) /** pte.u user */ val u = Bool() /** pte.g global */ val g = Bool() /** access exception. * D$ -> PTW -> TLB AE * Alignment failed. */ val ae_ptw = Bool() val ae_final = Bool() val ae_stage2 = Bool() /** page fault */ val pf = Bool() /** guest page fault */ val gf = Bool() /** supervisor write */ val sw = Bool() /** supervisor execute */ val sx = Bool() /** supervisor read */ val sr = Bool() /** hypervisor write */ val hw = Bool() /** hypervisor excute */ val hx = Bool() /** hypervisor read */ val hr = Bool() /** prot_w */ val pw = Bool() /** prot_x */ val px = Bool() /** prot_r */ val pr = Bool() /** PutPartial */ val ppp = Bool() /** AMO logical */ val pal = Bool() /** AMO arithmetic */ val paa = Bool() /** get/put effects */ val eff = Bool() /** cacheable */ val c = Bool() /** fragmented_superpage support */ val fragmented_superpage = Bool() } /** basic cell for TLB data */ class TLBEntry(val nSectors: Int, val superpage: Boolean, val superpageOnly: Boolean)(implicit p: Parameters) extends CoreBundle()(p) { require(nSectors == 1 || !superpage) require(!superpageOnly || superpage) val level = UInt(log2Ceil(pgLevels).W) /** use vpn as tag */ val tag_vpn = UInt(vpnBits.W) /** tag in vitualization mode */ val tag_v = Bool() /** entry data */ val data = Vec(nSectors, UInt(new TLBEntryData().getWidth.W)) /** valid bit */ val valid = Vec(nSectors, Bool()) /** returns all entry data in this entry */ def entry_data = data.map(_.asTypeOf(new TLBEntryData)) /** returns the index of sector */ private def sectorIdx(vpn: UInt) = vpn.extract(nSectors.log2-1, 0) /** returns the entry data matched with this vpn*/ def getData(vpn: UInt) = OptimizationBarrier(data(sectorIdx(vpn)).asTypeOf(new TLBEntryData)) /** returns whether a sector hits */ def sectorHit(vpn: UInt, virtual: Bool) = valid.orR && sectorTagMatch(vpn, virtual) /** returns whether tag matches vpn */ def sectorTagMatch(vpn: UInt, virtual: Bool) = (((tag_vpn ^ vpn) >> nSectors.log2) === 0.U) && (tag_v === virtual) /** returns hit signal */ def hit(vpn: UInt, virtual: Bool): Bool = { if (superpage && usingVM) { var tagMatch = valid.head && (tag_v === virtual) for (j <- 0 until pgLevels) { val base = (pgLevels - 1 - j) * pgLevelBits val n = pgLevelBits + (if (j == 0) hypervisorExtraAddrBits else 0) val ignore = level < j.U || (superpageOnly && j == pgLevels - 1).B tagMatch = tagMatch && (ignore || (tag_vpn ^ vpn)(base + n - 1, base) === 0.U) } tagMatch } else { val idx = sectorIdx(vpn) valid(idx) && sectorTagMatch(vpn, virtual) } } /** returns the ppn of the input TLBEntryData */ def ppn(vpn: UInt, data: TLBEntryData) = { val supervisorVPNBits = pgLevels * pgLevelBits if (superpage && usingVM) { var res = data.ppn >> pgLevelBits*(pgLevels - 1) for (j <- 1 until pgLevels) { val ignore = level < j.U || (superpageOnly && j == pgLevels - 1).B res = Cat(res, (Mux(ignore, vpn, 0.U) | data.ppn)(supervisorVPNBits - j*pgLevelBits - 1, supervisorVPNBits - (j + 1)*pgLevelBits)) } res } else { data.ppn } } /** does the refill * * find the target entry with vpn tag * and replace the target entry with the input entry data */ def insert(vpn: UInt, virtual: Bool, level: UInt, entry: TLBEntryData): Unit = { this.tag_vpn := vpn this.tag_v := virtual this.level := level.extract(log2Ceil(pgLevels - superpageOnly.toInt)-1, 0) val idx = sectorIdx(vpn) valid(idx) := true.B data(idx) := entry.asUInt } def invalidate(): Unit = { valid.foreach(_ := false.B) } def invalidate(virtual: Bool): Unit = { for ((v, e) <- valid zip entry_data) when (tag_v === virtual) { v := false.B } } def invalidateVPN(vpn: UInt, virtual: Bool): Unit = { if (superpage) { when (hit(vpn, virtual)) { invalidate() } } else { when (sectorTagMatch(vpn, virtual)) { for (((v, e), i) <- (valid zip entry_data).zipWithIndex) when (tag_v === virtual && i.U === sectorIdx(vpn)) { v := false.B } } } // For fragmented superpage mappings, we assume the worst (largest) // case, and zap entries whose most-significant VPNs match when (((tag_vpn ^ vpn) >> (pgLevelBits * (pgLevels - 1))) === 0.U) { for ((v, e) <- valid zip entry_data) when (tag_v === virtual && e.fragmented_superpage) { v := false.B } } } def invalidateNonGlobal(virtual: Bool): Unit = { for ((v, e) <- valid zip entry_data) when (tag_v === virtual && !e.g) { v := false.B } } } /** TLB config * * @param nSets the number of sets of PTE, follow [[ICacheParams.nSets]] * @param nWays the total number of wayss of PTE, follow [[ICacheParams.nWays]] * @param nSectors the number of ways in a single PTE TLBEntry * @param nSuperpageEntries the number of SuperpageEntries */ case class TLBConfig( nSets: Int, nWays: Int, nSectors: Int = 4, nSuperpageEntries: Int = 4) /** =Overview= * [[TLB]] is a TLB template which contains PMA logic and PMP checker. * * TLB caches PTE and accelerates the address translation process. * When tlb miss happens, ask PTW(L2TLB) for Page Table Walk. * Perform PMP and PMA check during the translation and throw exception if there were any. * * ==Cache Structure== * - Sectored Entry (PTE) * - set-associative or direct-mapped * - nsets = [[TLBConfig.nSets]] * - nways = [[TLBConfig.nWays]] / [[TLBConfig.nSectors]] * - PTEEntry( sectors = [[TLBConfig.nSectors]] ) * - LRU(if set-associative) * * - Superpage Entry(superpage PTE) * - fully associative * - nsets = [[TLBConfig.nSuperpageEntries]] * - PTEEntry(sectors = 1) * - PseudoLRU * * - Special Entry(PTE across PMP) * - nsets = 1 * - PTEEntry(sectors = 1) * * ==Address structure== * {{{ * |vaddr | * |ppn/vpn | pgIndex | * | | | * | |nSets |nSector | |}}} * * ==State Machine== * {{{ * s_ready: ready to accept request from CPU. * s_request: when L1TLB(this) miss, send request to PTW(L2TLB), . * s_wait: wait for PTW to refill L1TLB. * s_wait_invalidate: L1TLB is waiting for respond from PTW, but L1TLB will invalidate respond from PTW.}}} * * ==PMP== * pmp check * - special_entry: always check * - other entry: check on refill * * ==Note== * PMA consume diplomacy parameter generate physical memory address checking logic * * Boom use Rocket ITLB, and its own DTLB. * * Accelerators:{{{ * sha3: DTLB * gemmini: DTLB * hwacha: DTLB*2+ITLB}}} * @param instruction true for ITLB, false for DTLB * @param lgMaxSize @todo seems granularity * @param cfg [[TLBConfig]] * @param edge collect SoC metadata. */ class TLB(instruction: Boolean, lgMaxSize: Int, cfg: TLBConfig)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) { override def desiredName = if (instruction) "ITLB" else "DTLB" val io = IO(new Bundle { /** request from Core */ val req = Flipped(Decoupled(new TLBReq(lgMaxSize))) /** response to Core */ val resp = Output(new TLBResp(lgMaxSize)) /** SFence Input */ val sfence = Flipped(Valid(new SFenceReq)) /** IO to PTW */ val ptw = new TLBPTWIO /** suppress a TLB refill, one cycle after a miss */ val kill = Input(Bool()) }) io.ptw.customCSRs := DontCare val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits) val vpn = io.req.bits.vaddr(vaddrBits-1, pgIdxBits) /** index for sectored_Entry */ val memIdx = vpn.extract(cfg.nSectors.log2 + cfg.nSets.log2 - 1, cfg.nSectors.log2) /** TLB Entry */ val sectored_entries = Reg(Vec(cfg.nSets, Vec(cfg.nWays / cfg.nSectors, new TLBEntry(cfg.nSectors, false, false)))) /** Superpage Entry */ val superpage_entries = Reg(Vec(cfg.nSuperpageEntries, new TLBEntry(1, true, true))) /** Special Entry * * If PMP granularity is less than page size, thus need additional "special" entry manage PMP. */ val special_entry = (!pageGranularityPMPs).option(Reg(new TLBEntry(1, true, false))) def ordinary_entries = sectored_entries(memIdx) ++ superpage_entries def all_entries = ordinary_entries ++ special_entry def all_real_entries = sectored_entries.flatten ++ superpage_entries ++ special_entry val s_ready :: s_request :: s_wait :: s_wait_invalidate :: Nil = Enum(4) val state = RegInit(s_ready) // use vpn as refill_tag val r_refill_tag = Reg(UInt(vpnBits.W)) val r_superpage_repl_addr = Reg(UInt(log2Ceil(superpage_entries.size).W)) val r_sectored_repl_addr = Reg(UInt(log2Ceil(sectored_entries.head.size).W)) val r_sectored_hit = Reg(Valid(UInt(log2Ceil(sectored_entries.head.size).W))) val r_superpage_hit = Reg(Valid(UInt(log2Ceil(superpage_entries.size).W))) val r_vstage1_en = Reg(Bool()) val r_stage2_en = Reg(Bool()) val r_need_gpa = Reg(Bool()) val r_gpa_valid = Reg(Bool()) val r_gpa = Reg(UInt(vaddrBits.W)) val r_gpa_vpn = Reg(UInt(vpnBits.W)) val r_gpa_is_pte = Reg(Bool()) /** privilege mode */ val priv = io.req.bits.prv val priv_v = usingHypervisor.B && io.req.bits.v val priv_s = priv(0) // user mode and supervisor mode val priv_uses_vm = priv <= PRV.S.U val satp = Mux(priv_v, io.ptw.vsatp, io.ptw.ptbr) val stage1_en = usingVM.B && satp.mode(satp.mode.getWidth-1) /** VS-stage translation enable */ val vstage1_en = usingHypervisor.B && priv_v && io.ptw.vsatp.mode(io.ptw.vsatp.mode.getWidth-1) /** G-stage translation enable */ val stage2_en = usingHypervisor.B && priv_v && io.ptw.hgatp.mode(io.ptw.hgatp.mode.getWidth-1) /** Enable Virtual Memory when: * 1. statically configured * 1. satp highest bits enabled * i. RV32: * - 0 -> Bare * - 1 -> SV32 * i. RV64: * - 0000 -> Bare * - 1000 -> SV39 * - 1001 -> SV48 * - 1010 -> SV57 * - 1011 -> SV64 * 1. In virtualization mode, vsatp highest bits enabled * 1. priv mode in U and S. * 1. in H & M mode, disable VM. * 1. no passthrough(micro-arch defined.) * * @see RV-priv spec 4.1.11 Supervisor Address Translation and Protection (satp) Register * @see RV-priv spec 8.2.18 Virtual Supervisor Address Translation and Protection Register (vsatp) */ val vm_enabled = (stage1_en || stage2_en) && priv_uses_vm && !io.req.bits.passthrough // flush guest entries on vsatp.MODE Bare <-> SvXX transitions val v_entries_use_stage1 = RegInit(false.B) val vsatp_mode_mismatch = priv_v && (vstage1_en =/= v_entries_use_stage1) && !io.req.bits.passthrough // share a single physical memory attribute checker (unshare if critical path) val refill_ppn = io.ptw.resp.bits.pte.ppn(ppnBits-1, 0) /** refill signal */ val do_refill = usingVM.B && io.ptw.resp.valid /** sfence invalidate refill */ val invalidate_refill = state.isOneOf(s_request /* don't care */, s_wait_invalidate) || io.sfence.valid // PMP val mpu_ppn = Mux(do_refill, refill_ppn, Mux(vm_enabled && special_entry.nonEmpty.B, special_entry.map(e => e.ppn(vpn, e.getData(vpn))).getOrElse(0.U), io.req.bits.vaddr >> pgIdxBits)) val mpu_physaddr = Cat(mpu_ppn, io.req.bits.vaddr(pgIdxBits-1, 0)) val mpu_priv = Mux[UInt](usingVM.B && (do_refill || io.req.bits.passthrough /* PTW */), PRV.S.U, Cat(io.ptw.status.debug, priv)) val pmp = Module(new PMPChecker(lgMaxSize)) pmp.io.addr := mpu_physaddr pmp.io.size := io.req.bits.size pmp.io.pmp := (io.ptw.pmp: Seq[PMP]) pmp.io.prv := mpu_priv val pma = Module(new PMAChecker(edge.manager)(p)) pma.io.paddr := mpu_physaddr // todo: using DataScratchpad doesn't support cacheable. val cacheable = pma.io.resp.cacheable && (instruction || !usingDataScratchpad).B val homogeneous = TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), BigInt(1) << pgIdxBits, 1 << lgMaxSize)(mpu_physaddr).homogeneous // In M mode, if access DM address(debug module program buffer) val deny_access_to_debug = mpu_priv <= PRV.M.U && p(DebugModuleKey).map(dmp => dmp.address.contains(mpu_physaddr)).getOrElse(false.B) val prot_r = pma.io.resp.r && !deny_access_to_debug && pmp.io.r val prot_w = pma.io.resp.w && !deny_access_to_debug && pmp.io.w val prot_pp = pma.io.resp.pp val prot_al = pma.io.resp.al val prot_aa = pma.io.resp.aa val prot_x = pma.io.resp.x && !deny_access_to_debug && pmp.io.x val prot_eff = pma.io.resp.eff // hit check val sector_hits = sectored_entries(memIdx).map(_.sectorHit(vpn, priv_v)) val superpage_hits = superpage_entries.map(_.hit(vpn, priv_v)) val hitsVec = all_entries.map(vm_enabled && _.hit(vpn, priv_v)) val real_hits = hitsVec.asUInt val hits = Cat(!vm_enabled, real_hits) // use ptw response to refill // permission bit arrays when (do_refill) { val pte = io.ptw.resp.bits.pte val refill_v = r_vstage1_en || r_stage2_en val newEntry = Wire(new TLBEntryData) newEntry.ppn := pte.ppn newEntry.c := cacheable newEntry.u := pte.u newEntry.g := pte.g && pte.v newEntry.ae_ptw := io.ptw.resp.bits.ae_ptw newEntry.ae_final := io.ptw.resp.bits.ae_final newEntry.ae_stage2 := io.ptw.resp.bits.ae_final && io.ptw.resp.bits.gpa_is_pte && r_stage2_en newEntry.pf := io.ptw.resp.bits.pf newEntry.gf := io.ptw.resp.bits.gf newEntry.hr := io.ptw.resp.bits.hr newEntry.hw := io.ptw.resp.bits.hw newEntry.hx := io.ptw.resp.bits.hx newEntry.sr := pte.sr() newEntry.sw := pte.sw() newEntry.sx := pte.sx() newEntry.pr := prot_r newEntry.pw := prot_w newEntry.px := prot_x newEntry.ppp := prot_pp newEntry.pal := prot_al newEntry.paa := prot_aa newEntry.eff := prot_eff newEntry.fragmented_superpage := io.ptw.resp.bits.fragmented_superpage // refill special_entry when (special_entry.nonEmpty.B && !io.ptw.resp.bits.homogeneous) { special_entry.foreach(_.insert(r_refill_tag, refill_v, io.ptw.resp.bits.level, newEntry)) }.elsewhen (io.ptw.resp.bits.level < (pgLevels-1).U) { val waddr = Mux(r_superpage_hit.valid && usingHypervisor.B, r_superpage_hit.bits, r_superpage_repl_addr) for ((e, i) <- superpage_entries.zipWithIndex) when (r_superpage_repl_addr === i.U) { e.insert(r_refill_tag, refill_v, io.ptw.resp.bits.level, newEntry) when (invalidate_refill) { e.invalidate() } } // refill sectored_hit }.otherwise { val r_memIdx = r_refill_tag.extract(cfg.nSectors.log2 + cfg.nSets.log2 - 1, cfg.nSectors.log2) val waddr = Mux(r_sectored_hit.valid, r_sectored_hit.bits, r_sectored_repl_addr) for ((e, i) <- sectored_entries(r_memIdx).zipWithIndex) when (waddr === i.U) { when (!r_sectored_hit.valid) { e.invalidate() } e.insert(r_refill_tag, refill_v, 0.U, newEntry) when (invalidate_refill) { e.invalidate() } } } r_gpa_valid := io.ptw.resp.bits.gpa.valid r_gpa := io.ptw.resp.bits.gpa.bits r_gpa_is_pte := io.ptw.resp.bits.gpa_is_pte } // get all entries data. val entries = all_entries.map(_.getData(vpn)) val normal_entries = entries.take(ordinary_entries.size) // parallel query PPN from [[all_entries]], if VM not enabled return VPN instead val ppn = Mux1H(hitsVec :+ !vm_enabled, (all_entries zip entries).map{ case (entry, data) => entry.ppn(vpn, data) } :+ vpn(ppnBits-1, 0)) val nPhysicalEntries = 1 + special_entry.size // generally PTW misaligned load exception. val ptw_ae_array = Cat(false.B, entries.map(_.ae_ptw).asUInt) val final_ae_array = Cat(false.B, entries.map(_.ae_final).asUInt) val ptw_pf_array = Cat(false.B, entries.map(_.pf).asUInt) val ptw_gf_array = Cat(false.B, entries.map(_.gf).asUInt) val sum = Mux(priv_v, io.ptw.gstatus.sum, io.ptw.status.sum) // if in hypervisor/machine mode, cannot read/write user entries. // if in superviosr/user mode, "If the SUM bit in the sstatus register is set, supervisor mode software may also access pages with U=1.(from spec)" val priv_rw_ok = Mux(!priv_s || sum, entries.map(_.u).asUInt, 0.U) | Mux(priv_s, ~entries.map(_.u).asUInt, 0.U) // if in hypervisor/machine mode, other than user pages, all pages are executable. // if in superviosr/user mode, only user page can execute. val priv_x_ok = Mux(priv_s, ~entries.map(_.u).asUInt, entries.map(_.u).asUInt) val stage1_bypass = Fill(entries.size, usingHypervisor.B) & (Fill(entries.size, !stage1_en) | entries.map(_.ae_stage2).asUInt) val mxr = io.ptw.status.mxr | Mux(priv_v, io.ptw.gstatus.mxr, false.B) // "The vsstatus field MXR, which makes execute-only pages readable, only overrides VS-stage page protection.(from spec)" val r_array = Cat(true.B, (priv_rw_ok & (entries.map(_.sr).asUInt | Mux(mxr, entries.map(_.sx).asUInt, 0.U))) | stage1_bypass) val w_array = Cat(true.B, (priv_rw_ok & entries.map(_.sw).asUInt) | stage1_bypass) val x_array = Cat(true.B, (priv_x_ok & entries.map(_.sx).asUInt) | stage1_bypass) val stage2_bypass = Fill(entries.size, !stage2_en) val hr_array = Cat(true.B, entries.map(_.hr).asUInt | Mux(io.ptw.status.mxr, entries.map(_.hx).asUInt, 0.U) | stage2_bypass) val hw_array = Cat(true.B, entries.map(_.hw).asUInt | stage2_bypass) val hx_array = Cat(true.B, entries.map(_.hx).asUInt | stage2_bypass) // These array is for each TLB entries. // user mode can read: PMA OK, TLB OK, AE OK val pr_array = Cat(Fill(nPhysicalEntries, prot_r), normal_entries.map(_.pr).asUInt) & ~(ptw_ae_array | final_ae_array) // user mode can write: PMA OK, TLB OK, AE OK val pw_array = Cat(Fill(nPhysicalEntries, prot_w), normal_entries.map(_.pw).asUInt) & ~(ptw_ae_array | final_ae_array) // user mode can write: PMA OK, TLB OK, AE OK val px_array = Cat(Fill(nPhysicalEntries, prot_x), normal_entries.map(_.px).asUInt) & ~(ptw_ae_array | final_ae_array) // put effect val eff_array = Cat(Fill(nPhysicalEntries, prot_eff), normal_entries.map(_.eff).asUInt) // cacheable val c_array = Cat(Fill(nPhysicalEntries, cacheable), normal_entries.map(_.c).asUInt) // put partial val ppp_array = Cat(Fill(nPhysicalEntries, prot_pp), normal_entries.map(_.ppp).asUInt) // atomic arithmetic val paa_array = Cat(Fill(nPhysicalEntries, prot_aa), normal_entries.map(_.paa).asUInt) // atomic logic val pal_array = Cat(Fill(nPhysicalEntries, prot_al), normal_entries.map(_.pal).asUInt) val ppp_array_if_cached = ppp_array | c_array val paa_array_if_cached = paa_array | (if(usingAtomicsInCache) c_array else 0.U) val pal_array_if_cached = pal_array | (if(usingAtomicsInCache) c_array else 0.U) val prefetchable_array = Cat((cacheable && homogeneous) << (nPhysicalEntries-1), normal_entries.map(_.c).asUInt) // vaddr misaligned: vaddr[1:0]=b00 val misaligned = (io.req.bits.vaddr & (UIntToOH(io.req.bits.size) - 1.U)).orR def badVA(guestPA: Boolean): Bool = { val additionalPgLevels = (if (guestPA) io.ptw.hgatp else satp).additionalPgLevels val extraBits = if (guestPA) hypervisorExtraAddrBits else 0 val signed = !guestPA val nPgLevelChoices = pgLevels - minPgLevels + 1 val minVAddrBits = pgIdxBits + minPgLevels * pgLevelBits + extraBits (for (i <- 0 until nPgLevelChoices) yield { val mask = ((BigInt(1) << vaddrBitsExtended) - (BigInt(1) << (minVAddrBits + i * pgLevelBits - signed.toInt))).U val maskedVAddr = io.req.bits.vaddr & mask additionalPgLevels === i.U && !(maskedVAddr === 0.U || signed.B && maskedVAddr === mask) }).orR } val bad_gpa = if (!usingHypervisor) false.B else vm_enabled && !stage1_en && badVA(true) val bad_va = if (!usingVM || (minPgLevels == pgLevels && vaddrBits == vaddrBitsExtended)) false.B else vm_enabled && stage1_en && badVA(false) val cmd_lrsc = usingAtomics.B && io.req.bits.cmd.isOneOf(M_XLR, M_XSC) val cmd_amo_logical = usingAtomics.B && isAMOLogical(io.req.bits.cmd) val cmd_amo_arithmetic = usingAtomics.B && isAMOArithmetic(io.req.bits.cmd) val cmd_put_partial = io.req.bits.cmd === M_PWR val cmd_read = isRead(io.req.bits.cmd) val cmd_readx = usingHypervisor.B && io.req.bits.cmd === M_HLVX val cmd_write = isWrite(io.req.bits.cmd) val cmd_write_perms = cmd_write || io.req.bits.cmd.isOneOf(M_FLUSH_ALL, M_WOK) // not a write, but needs write permissions val lrscAllowed = Mux((usingDataScratchpad || usingAtomicsOnlyForIO).B, 0.U, c_array) val ae_array = Mux(misaligned, eff_array, 0.U) | Mux(cmd_lrsc, ~lrscAllowed, 0.U) // access exception needs SoC information from PMA val ae_ld_array = Mux(cmd_read, ae_array | ~pr_array, 0.U) val ae_st_array = Mux(cmd_write_perms, ae_array | ~pw_array, 0.U) | Mux(cmd_put_partial, ~ppp_array_if_cached, 0.U) | Mux(cmd_amo_logical, ~pal_array_if_cached, 0.U) | Mux(cmd_amo_arithmetic, ~paa_array_if_cached, 0.U) val must_alloc_array = Mux(cmd_put_partial, ~ppp_array, 0.U) | Mux(cmd_amo_logical, ~pal_array, 0.U) | Mux(cmd_amo_arithmetic, ~paa_array, 0.U) | Mux(cmd_lrsc, ~0.U(pal_array.getWidth.W), 0.U) val pf_ld_array = Mux(cmd_read, ((~Mux(cmd_readx, x_array, r_array) & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array, 0.U) val pf_st_array = Mux(cmd_write_perms, ((~w_array & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array, 0.U) val pf_inst_array = ((~x_array & ~ptw_ae_array) | ptw_pf_array) & ~ptw_gf_array val gf_ld_array = Mux(priv_v && cmd_read, (~Mux(cmd_readx, hx_array, hr_array) | ptw_gf_array) & ~ptw_ae_array, 0.U) val gf_st_array = Mux(priv_v && cmd_write_perms, (~hw_array | ptw_gf_array) & ~ptw_ae_array, 0.U) val gf_inst_array = Mux(priv_v, (~hx_array | ptw_gf_array) & ~ptw_ae_array, 0.U) val gpa_hits = { val need_gpa_mask = if (instruction) gf_inst_array else gf_ld_array | gf_st_array val hit_mask = Fill(ordinary_entries.size, r_gpa_valid && r_gpa_vpn === vpn) | Fill(all_entries.size, !vstage1_en) hit_mask | ~need_gpa_mask(all_entries.size-1, 0) } val tlb_hit_if_not_gpa_miss = real_hits.orR val tlb_hit = (real_hits & gpa_hits).orR // leads to s_request val tlb_miss = vm_enabled && !vsatp_mode_mismatch && !bad_va && !tlb_hit val sectored_plru = new SetAssocLRU(cfg.nSets, sectored_entries.head.size, "plru") val superpage_plru = new PseudoLRU(superpage_entries.size) when (io.req.valid && vm_enabled) { // replace when (sector_hits.orR) { sectored_plru.access(memIdx, OHToUInt(sector_hits)) } when (superpage_hits.orR) { superpage_plru.access(OHToUInt(superpage_hits)) } } // Superpages create the possibility that two entries in the TLB may match. // This corresponds to a software bug, but we can't return complete garbage; // we must return either the old translation or the new translation. This // isn't compatible with the Mux1H approach. So, flush the TLB and report // a miss on duplicate entries. val multipleHits = PopCountAtLeast(real_hits, 2) // only pull up req.ready when this is s_ready state. io.req.ready := state === s_ready // page fault io.resp.pf.ld := (bad_va && cmd_read) || (pf_ld_array & hits).orR io.resp.pf.st := (bad_va && cmd_write_perms) || (pf_st_array & hits).orR io.resp.pf.inst := bad_va || (pf_inst_array & hits).orR // guest page fault io.resp.gf.ld := (bad_gpa && cmd_read) || (gf_ld_array & hits).orR io.resp.gf.st := (bad_gpa && cmd_write_perms) || (gf_st_array & hits).orR io.resp.gf.inst := bad_gpa || (gf_inst_array & hits).orR // access exception io.resp.ae.ld := (ae_ld_array & hits).orR io.resp.ae.st := (ae_st_array & hits).orR io.resp.ae.inst := (~px_array & hits).orR // misaligned io.resp.ma.ld := misaligned && cmd_read io.resp.ma.st := misaligned && cmd_write io.resp.ma.inst := false.B // this is up to the pipeline to figure out io.resp.cacheable := (c_array & hits).orR io.resp.must_alloc := (must_alloc_array & hits).orR io.resp.prefetchable := (prefetchable_array & hits).orR && edge.manager.managers.forall(m => !m.supportsAcquireB || m.supportsHint).B io.resp.miss := do_refill || vsatp_mode_mismatch || tlb_miss || multipleHits io.resp.paddr := Cat(ppn, io.req.bits.vaddr(pgIdxBits-1, 0)) io.resp.size := io.req.bits.size io.resp.cmd := io.req.bits.cmd io.resp.gpa_is_pte := vstage1_en && r_gpa_is_pte io.resp.gpa := { val page = Mux(!vstage1_en, Cat(bad_gpa, vpn), r_gpa >> pgIdxBits) val offset = Mux(io.resp.gpa_is_pte, r_gpa(pgIdxBits-1, 0), io.req.bits.vaddr(pgIdxBits-1, 0)) Cat(page, offset) } io.ptw.req.valid := state === s_request io.ptw.req.bits.valid := !io.kill io.ptw.req.bits.bits.addr := r_refill_tag io.ptw.req.bits.bits.vstage1 := r_vstage1_en io.ptw.req.bits.bits.stage2 := r_stage2_en io.ptw.req.bits.bits.need_gpa := r_need_gpa if (usingVM) { when(io.ptw.req.fire && io.ptw.req.bits.valid) { r_gpa_valid := false.B r_gpa_vpn := r_refill_tag } val sfence = io.sfence.valid // this is [[s_ready]] // handle miss/hit at the first cycle. // if miss, request PTW(L2TLB). when (io.req.fire && tlb_miss) { state := s_request r_refill_tag := vpn r_need_gpa := tlb_hit_if_not_gpa_miss r_vstage1_en := vstage1_en r_stage2_en := stage2_en r_superpage_repl_addr := replacementEntry(superpage_entries, superpage_plru.way) r_sectored_repl_addr := replacementEntry(sectored_entries(memIdx), sectored_plru.way(memIdx)) r_sectored_hit.valid := sector_hits.orR r_sectored_hit.bits := OHToUInt(sector_hits) r_superpage_hit.valid := superpage_hits.orR r_superpage_hit.bits := OHToUInt(superpage_hits) } // Handle SFENCE.VMA when send request to PTW. // SFENCE.VMA io.ptw.req.ready kill // ? ? 1 // 0 0 0 // 0 1 0 -> s_wait // 1 0 0 -> s_wait_invalidate // 1 0 0 -> s_ready when (state === s_request) { // SFENCE.VMA will kill TLB entries based on rs1 and rs2. It will take 1 cycle. when (sfence) { state := s_ready } // here should be io.ptw.req.fire, but assert(io.ptw.req.ready === true.B) // fire -> s_wait when (io.ptw.req.ready) { state := Mux(sfence, s_wait_invalidate, s_wait) } // If CPU kills request(frontend.s2_redirect) when (io.kill) { state := s_ready } } // sfence in refill will results in invalidate when (state === s_wait && sfence) { state := s_wait_invalidate } // after CPU acquire response, go back to s_ready. when (io.ptw.resp.valid) { state := s_ready } // SFENCE processing logic. when (sfence) { assert(!io.sfence.bits.rs1 || (io.sfence.bits.addr >> pgIdxBits) === vpn) for (e <- all_real_entries) { val hv = usingHypervisor.B && io.sfence.bits.hv val hg = usingHypervisor.B && io.sfence.bits.hg when (!hg && io.sfence.bits.rs1) { e.invalidateVPN(vpn, hv) } .elsewhen (!hg && io.sfence.bits.rs2) { e.invalidateNonGlobal(hv) } .otherwise { e.invalidate(hv || hg) } } } when(io.req.fire && vsatp_mode_mismatch) { all_real_entries.foreach(_.invalidate(true.B)) v_entries_use_stage1 := vstage1_en } when (multipleHits || reset.asBool) { all_real_entries.foreach(_.invalidate()) } ccover(io.ptw.req.fire, "MISS", "TLB miss") ccover(io.ptw.req.valid && !io.ptw.req.ready, "PTW_STALL", "TLB miss, but PTW busy") ccover(state === s_wait_invalidate, "SFENCE_DURING_REFILL", "flush TLB during TLB refill") ccover(sfence && !io.sfence.bits.rs1 && !io.sfence.bits.rs2, "SFENCE_ALL", "flush TLB") ccover(sfence && !io.sfence.bits.rs1 && io.sfence.bits.rs2, "SFENCE_ASID", "flush TLB ASID") ccover(sfence && io.sfence.bits.rs1 && !io.sfence.bits.rs2, "SFENCE_LINE", "flush TLB line") ccover(sfence && io.sfence.bits.rs1 && io.sfence.bits.rs2, "SFENCE_LINE_ASID", "flush TLB line/ASID") ccover(multipleHits, "MULTIPLE_HITS", "Two matching translations in TLB") } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = property.cover(cond, s"${if (instruction) "I" else "D"}TLB_$label", "MemorySystem;;" + desc) /** Decides which entry to be replaced * * If there is a invalid entry, replace it with priorityencoder; * if not, replace the alt entry * * @return mask for TLBEntry replacement */ def replacementEntry(set: Seq[TLBEntry], alt: UInt) = { val valids = set.map(_.valid.orR).asUInt Mux(valids.andR, alt, PriorityEncoder(~valids)) } } File TLBPermissions.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util._ import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes, RegionType, AddressDecoder} import freechips.rocketchip.tilelink.TLManagerParameters case class TLBPermissions( homogeneous: Bool, // if false, the below are undefined r: Bool, // readable w: Bool, // writeable x: Bool, // executable c: Bool, // cacheable a: Bool, // arithmetic ops l: Bool) // logical ops object TLBPageLookup { private case class TLBFixedPermissions( e: Boolean, // get-/put-effects r: Boolean, // readable w: Boolean, // writeable x: Boolean, // executable c: Boolean, // cacheable a: Boolean, // arithmetic ops l: Boolean) { // logical ops val useful = r || w || x || c || a || l } private def groupRegions(managers: Seq[TLManagerParameters]): Map[TLBFixedPermissions, Seq[AddressSet]] = { val permissions = managers.map { m => (m.address, TLBFixedPermissions( e = Seq(RegionType.PUT_EFFECTS, RegionType.GET_EFFECTS) contains m.regionType, r = m.supportsGet || m.supportsAcquireB, // if cached, never uses Get w = m.supportsPutFull || m.supportsAcquireT, // if cached, never uses Put x = m.executable, c = m.supportsAcquireB, a = m.supportsArithmetic, l = m.supportsLogical)) } permissions .filter(_._2.useful) // get rid of no-permission devices .groupBy(_._2) // group by permission type .mapValues(seq => AddressSet.unify(seq.flatMap(_._1))) // coalesce same-permission regions .toMap } // Unmapped memory is considered to be inhomogeneous def apply(managers: Seq[TLManagerParameters], xLen: Int, cacheBlockBytes: Int, pageSize: BigInt, maxRequestBytes: Int): UInt => TLBPermissions = { require (isPow2(xLen) && xLen >= 8) require (isPow2(cacheBlockBytes) && cacheBlockBytes >= xLen/8) require (isPow2(pageSize) && pageSize >= cacheBlockBytes) val xferSizes = TransferSizes(cacheBlockBytes, cacheBlockBytes) val allSizes = TransferSizes(1, maxRequestBytes) val amoSizes = TransferSizes(4, xLen/8) val permissions = managers.foreach { m => require (!m.supportsGet || m.supportsGet .contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsGet} Get, but must support ${allSizes}") require (!m.supportsPutFull || m.supportsPutFull .contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsPutFull} PutFull, but must support ${allSizes}") require (!m.supportsPutPartial || m.supportsPutPartial.contains(allSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsPutPartial} PutPartial, but must support ${allSizes}") require (!m.supportsAcquireB || m.supportsAcquireB .contains(xferSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsAcquireB} AcquireB, but must support ${xferSizes}") require (!m.supportsAcquireT || m.supportsAcquireT .contains(xferSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsAcquireT} AcquireT, but must support ${xferSizes}") require (!m.supportsLogical || m.supportsLogical .contains(amoSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsLogical} Logical, but must support ${amoSizes}") require (!m.supportsArithmetic || m.supportsArithmetic.contains(amoSizes), s"Memory region '${m.name}' at ${m.address} only supports ${m.supportsArithmetic} Arithmetic, but must support ${amoSizes}") require (!(m.supportsAcquireB && m.supportsPutFull && !m.supportsAcquireT), s"Memory region '${m.name}' supports AcquireB (cached read) and PutFull (un-cached write) but not AcquireT (cached write)") } val grouped = groupRegions(managers) .mapValues(_.filter(_.alignment >= pageSize)) // discard any region that's not big enough def lowCostProperty(prop: TLBFixedPermissions => Boolean): UInt => Bool = { val (yesm, nom) = grouped.partition { case (k, eq) => prop(k) } val (yes, no) = (yesm.values.flatten.toList, nom.values.flatten.toList) // Find the minimal bits needed to distinguish between yes and no val decisionMask = AddressDecoder(Seq(yes, no)) def simplify(x: Seq[AddressSet]) = AddressSet.unify(x.map(_.widen(~decisionMask)).distinct) val (yesf, nof) = (simplify(yes), simplify(no)) if (yesf.size < no.size) { (x: UInt) => yesf.map(_.contains(x)).foldLeft(false.B)(_ || _) } else { (x: UInt) => !nof.map(_.contains(x)).foldLeft(false.B)(_ || _) } } // Derive simplified property circuits (don't care when !homo) val rfn = lowCostProperty(_.r) val wfn = lowCostProperty(_.w) val xfn = lowCostProperty(_.x) val cfn = lowCostProperty(_.c) val afn = lowCostProperty(_.a) val lfn = lowCostProperty(_.l) val homo = AddressSet.unify(grouped.values.flatten.toList) (x: UInt) => TLBPermissions( homogeneous = homo.map(_.contains(x)).foldLeft(false.B)(_ || _), r = rfn(x), w = wfn(x), x = xfn(x), c = cfn(x), a = afn(x), l = lfn(x)) } // Are all pageSize intervals of mapped regions homogeneous? def homogeneous(managers: Seq[TLManagerParameters], pageSize: BigInt): Boolean = { groupRegions(managers).values.forall(_.forall(_.alignment >= pageSize)) } } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File PTW.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.rocket import chisel3._ import chisel3.util.{Arbiter, Cat, Decoupled, Enum, Mux1H, OHToUInt, PopCount, PriorityEncoder, PriorityEncoderOH, RegEnable, UIntToOH, Valid, is, isPow2, log2Ceil, switch} import chisel3.withClock import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.subsystem.CacheBlockBytes import freechips.rocketchip.tile._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ import freechips.rocketchip.util.property import scala.collection.mutable.ListBuffer /** PTE request from TLB to PTW * * TLB send a PTE request to PTW when L1TLB miss */ class PTWReq(implicit p: Parameters) extends CoreBundle()(p) { val addr = UInt(vpnBits.W) val need_gpa = Bool() val vstage1 = Bool() val stage2 = Bool() } /** PTE info from L2TLB to TLB * * containing: target PTE, exceptions, two-satge tanslation info */ class PTWResp(implicit p: Parameters) extends CoreBundle()(p) { /** ptw access exception */ val ae_ptw = Bool() /** final access exception */ val ae_final = Bool() /** page fault */ val pf = Bool() /** guest page fault */ val gf = Bool() /** hypervisor read */ val hr = Bool() /** hypervisor write */ val hw = Bool() /** hypervisor execute */ val hx = Bool() /** PTE to refill L1TLB * * source: L2TLB */ val pte = new PTE /** pte pglevel */ val level = UInt(log2Ceil(pgLevels).W) /** fragmented_superpage support */ val fragmented_superpage = Bool() /** homogeneous for both pma and pmp */ val homogeneous = Bool() val gpa = Valid(UInt(vaddrBits.W)) val gpa_is_pte = Bool() } /** IO between TLB and PTW * * PTW receives : * - PTE request * - CSRs info * - pmp results from PMP(in TLB) */ class TLBPTWIO(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val req = Decoupled(Valid(new PTWReq)) val resp = Flipped(Valid(new PTWResp)) val ptbr = Input(new PTBR()) val hgatp = Input(new PTBR()) val vsatp = Input(new PTBR()) val status = Input(new MStatus()) val hstatus = Input(new HStatus()) val gstatus = Input(new MStatus()) val pmp = Input(Vec(nPMPs, new PMP)) val customCSRs = Flipped(coreParams.customCSRs) } /** PTW performance statistics */ class PTWPerfEvents extends Bundle { val l2miss = Bool() val l2hit = Bool() val pte_miss = Bool() val pte_hit = Bool() } /** Datapath IO between PTW and Core * * PTW receives CSRs info, pmp checks, sfence instruction info * * PTW sends its performance statistics to core */ class DatapathPTWIO(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val ptbr = Input(new PTBR()) val hgatp = Input(new PTBR()) val vsatp = Input(new PTBR()) val sfence = Flipped(Valid(new SFenceReq)) val status = Input(new MStatus()) val hstatus = Input(new HStatus()) val gstatus = Input(new MStatus()) val pmp = Input(Vec(nPMPs, new PMP)) val perf = Output(new PTWPerfEvents()) val customCSRs = Flipped(coreParams.customCSRs) /** enable clock generated by ptw */ val clock_enabled = Output(Bool()) } /** PTE template for transmission * * contains useful methods to check PTE attributes * @see RV-priv spec 4.3.1 for pgae table entry format */ class PTE(implicit p: Parameters) extends CoreBundle()(p) { val reserved_for_future = UInt(10.W) val ppn = UInt(44.W) val reserved_for_software = Bits(2.W) /** dirty bit */ val d = Bool() /** access bit */ val a = Bool() /** global mapping */ val g = Bool() /** user mode accessible */ val u = Bool() /** whether the page is executable */ val x = Bool() /** whether the page is writable */ val w = Bool() /** whether the page is readable */ val r = Bool() /** valid bit */ val v = Bool() /** return true if find a pointer to next level page table */ def table(dummy: Int = 0) = v && !r && !w && !x && !d && !a && !u && reserved_for_future === 0.U /** return true if find a leaf PTE */ def leaf(dummy: Int = 0) = v && (r || (x && !w)) && a /** user read */ def ur(dummy: Int = 0) = sr() && u /** user write*/ def uw(dummy: Int = 0) = sw() && u /** user execute */ def ux(dummy: Int = 0) = sx() && u /** supervisor read */ def sr(dummy: Int = 0) = leaf() && r /** supervisor write */ def sw(dummy: Int = 0) = leaf() && w && d /** supervisor execute */ def sx(dummy: Int = 0) = leaf() && x /** full permission: writable and executable in user mode */ def isFullPerm(dummy: Int = 0) = uw() && ux() } /** L2TLB PTE template * * contains tag bits * @param nSets number of sets in L2TLB * @see RV-priv spec 4.3.1 for page table entry format */ class L2TLBEntry(nSets: Int)(implicit p: Parameters) extends CoreBundle()(p) with HasCoreParameters { val idxBits = log2Ceil(nSets) val tagBits = maxSVAddrBits - pgIdxBits - idxBits + (if (usingHypervisor) 1 else 0) val tag = UInt(tagBits.W) val ppn = UInt(ppnBits.W) /** dirty bit */ val d = Bool() /** access bit */ val a = Bool() /** user mode accessible */ val u = Bool() /** whether the page is executable */ val x = Bool() /** whether the page is writable */ val w = Bool() /** whether the page is readable */ val r = Bool() } /** PTW contains L2TLB, and performs page table walk for high level TLB, and cache queries from L1 TLBs(I$, D$, RoCC) * * It performs hierarchy page table query to mem for the desired leaf PTE and cache them in l2tlb. * Besides leaf PTEs, it also caches non-leaf PTEs in pte_cache to accerlerate the process. * * ==Structure== * - l2tlb : for leaf PTEs * - set-associative (configurable with [[CoreParams.nL2TLBEntries]]and [[CoreParams.nL2TLBWays]])) * - PLRU * - pte_cache: for non-leaf PTEs * - set-associative * - LRU * - s2_pte_cache: for non-leaf PTEs in 2-stage translation * - set-associative * - PLRU * * l2tlb Pipeline: 3 stage * {{{ * stage 0 : read * stage 1 : decode * stage 2 : hit check * }}} * ==State Machine== * s_ready: ready to reveive request from TLB * s_req: request mem; pte_cache hit judge * s_wait1: deal with l2tlb error * s_wait2: final hit judge * s_wait3: receive mem response * s_fragment_superpage: for superpage PTE * * @note l2tlb hit happens in s_req or s_wait1 * @see RV-priv spec 4.3-4.6 for Virtual-Memory System * @see RV-priv spec 8.5 for Two-Stage Address Translation * @todo details in two-stage translation */ class PTW(n: Int)(implicit edge: TLEdgeOut, p: Parameters) extends CoreModule()(p) { val io = IO(new Bundle { /** to n TLB */ val requestor = Flipped(Vec(n, new TLBPTWIO)) /** to HellaCache */ val mem = new HellaCacheIO /** to Core * * contains CSRs info and performance statistics */ val dpath = new DatapathPTWIO }) val s_ready :: s_req :: s_wait1 :: s_dummy1 :: s_wait2 :: s_wait3 :: s_dummy2 :: s_fragment_superpage :: Nil = Enum(8) val state = RegInit(s_ready) val l2_refill_wire = Wire(Bool()) /** Arbiter to arbite request from n TLB */ val arb = Module(new Arbiter(Valid(new PTWReq), n)) // use TLB req as arbitor's input arb.io.in <> io.requestor.map(_.req) // receive req only when s_ready and not in refill arb.io.out.ready := (state === s_ready) && !l2_refill_wire val resp_valid = RegNext(VecInit(Seq.fill(io.requestor.size)(false.B))) val clock_en = state =/= s_ready || l2_refill_wire || arb.io.out.valid || io.dpath.sfence.valid || io.dpath.customCSRs.disableDCacheClockGate io.dpath.clock_enabled := usingVM.B && clock_en val gated_clock = if (!usingVM || !tileParams.dcache.get.clockGate) clock else ClockGate(clock, clock_en, "ptw_clock_gate") withClock (gated_clock) { // entering gated-clock domain val invalidated = Reg(Bool()) /** current PTE level * {{{ * 0 <= count <= pgLevel-1 * count = pgLevel - 1 : leaf PTE * count < pgLevel - 1 : non-leaf PTE * }}} */ val count = Reg(UInt(log2Ceil(pgLevels).W)) val resp_ae_ptw = Reg(Bool()) val resp_ae_final = Reg(Bool()) val resp_pf = Reg(Bool()) val resp_gf = Reg(Bool()) val resp_hr = Reg(Bool()) val resp_hw = Reg(Bool()) val resp_hx = Reg(Bool()) val resp_fragmented_superpage = Reg(Bool()) /** tlb request */ val r_req = Reg(new PTWReq) /** current selected way in arbitor */ val r_req_dest = Reg(Bits()) // to respond to L1TLB : l2_hit // to construct mem.req.addr val r_pte = Reg(new PTE) val r_hgatp = Reg(new PTBR) // 2-stage pageLevel val aux_count = Reg(UInt(log2Ceil(pgLevels).W)) /** pte for 2-stage translation */ val aux_pte = Reg(new PTE) val gpa_pgoff = Reg(UInt(pgIdxBits.W)) // only valid in resp_gf case val stage2 = Reg(Bool()) val stage2_final = Reg(Bool()) val satp = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp, io.dpath.ptbr) val r_hgatp_initial_count = pgLevels.U - minPgLevels.U - r_hgatp.additionalPgLevels /** 2-stage translation both enable */ val do_both_stages = r_req.vstage1 && r_req.stage2 val max_count = count max aux_count val vpn = Mux(r_req.vstage1 && stage2, aux_pte.ppn, r_req.addr) val mem_resp_valid = RegNext(io.mem.resp.valid) val mem_resp_data = RegNext(io.mem.resp.bits.data) io.mem.uncached_resp.map { resp => assert(!(resp.valid && io.mem.resp.valid)) resp.ready := true.B when (resp.valid) { mem_resp_valid := true.B mem_resp_data := resp.bits.data } } // construct pte from mem.resp val (pte, invalid_paddr, invalid_gpa) = { val tmp = mem_resp_data.asTypeOf(new PTE()) val res = WireDefault(tmp) res.ppn := Mux(do_both_stages && !stage2, tmp.ppn(vpnBits.min(tmp.ppn.getWidth)-1, 0), tmp.ppn(ppnBits-1, 0)) when (tmp.r || tmp.w || tmp.x) { // for superpage mappings, make sure PPN LSBs are zero for (i <- 0 until pgLevels-1) when (count <= i.U && tmp.ppn((pgLevels-1-i)*pgLevelBits-1, (pgLevels-2-i)*pgLevelBits) =/= 0.U) { res.v := false.B } } (res, Mux(do_both_stages && !stage2, (tmp.ppn >> vpnBits) =/= 0.U, (tmp.ppn >> ppnBits) =/= 0.U), do_both_stages && !stage2 && checkInvalidHypervisorGPA(r_hgatp, tmp.ppn)) } // find non-leaf PTE, need traverse val traverse = pte.table() && !invalid_paddr && !invalid_gpa && count < (pgLevels-1).U /** address send to mem for enquerry */ val pte_addr = if (!usingVM) 0.U else { val vpn_idxs = (0 until pgLevels).map { i => val width = pgLevelBits + (if (i <= pgLevels - minPgLevels) hypervisorExtraAddrBits else 0) (vpn >> (pgLevels - i - 1) * pgLevelBits)(width - 1, 0) } val mask = Mux(stage2 && count === r_hgatp_initial_count, ((1 << (hypervisorExtraAddrBits + pgLevelBits)) - 1).U, ((1 << pgLevelBits) - 1).U) val vpn_idx = vpn_idxs(count) & mask val raw_pte_addr = ((r_pte.ppn << pgLevelBits) | vpn_idx) << log2Ceil(xLen / 8) val size = if (usingHypervisor) vaddrBits else paddrBits //use r_pte.ppn as page table base address //use vpn slice as offset raw_pte_addr.apply(size.min(raw_pte_addr.getWidth) - 1, 0) } /** stage2_pte_cache input addr */ val stage2_pte_cache_addr = if (!usingHypervisor) 0.U else { val vpn_idxs = (0 until pgLevels - 1).map { i => (r_req.addr >> (pgLevels - i - 1) * pgLevelBits)(pgLevelBits - 1, 0) } val vpn_idx = vpn_idxs(aux_count) val raw_s2_pte_cache_addr = Cat(aux_pte.ppn, vpn_idx) << log2Ceil(xLen / 8) raw_s2_pte_cache_addr(vaddrBits.min(raw_s2_pte_cache_addr.getWidth) - 1, 0) } def makeFragmentedSuperpagePPN(ppn: UInt): Seq[UInt] = { (pgLevels-1 until 0 by -1).map(i => Cat(ppn >> (pgLevelBits*i), r_req.addr(((pgLevelBits*i) min vpnBits)-1, 0).padTo(pgLevelBits*i))) } /** PTECache caches non-leaf PTE * @param s2 true: 2-stage address translation */ def makePTECache(s2: Boolean): (Bool, UInt) = if (coreParams.nPTECacheEntries == 0) { (false.B, 0.U) } else { val plru = new PseudoLRU(coreParams.nPTECacheEntries) val valid = RegInit(0.U(coreParams.nPTECacheEntries.W)) val tags = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor) 1 + vaddrBits else paddrBits).W))) // not include full pte, only ppn val data = Reg(Vec(coreParams.nPTECacheEntries, UInt((if (usingHypervisor && s2) vpnBits else ppnBits).W))) val can_hit = if (s2) count === r_hgatp_initial_count && aux_count < (pgLevels-1).U && r_req.vstage1 && stage2 && !stage2_final else count < (pgLevels-1).U && Mux(r_req.vstage1, stage2, !r_req.stage2) val can_refill = if (s2) do_both_stages && !stage2 && !stage2_final else can_hit val tag = if (s2) Cat(true.B, stage2_pte_cache_addr.padTo(vaddrBits)) else Cat(r_req.vstage1, pte_addr.padTo(if (usingHypervisor) vaddrBits else paddrBits)) val hits = tags.map(_ === tag).asUInt & valid val hit = hits.orR && can_hit // refill with mem response when (mem_resp_valid && traverse && can_refill && !hits.orR && !invalidated) { val r = Mux(valid.andR, plru.way, PriorityEncoder(~valid)) valid := valid | UIntToOH(r) tags(r) := tag data(r) := pte.ppn plru.access(r) } // replace when (hit && state === s_req) { plru.access(OHToUInt(hits)) } when (io.dpath.sfence.valid && (!io.dpath.sfence.bits.rs1 || usingHypervisor.B && io.dpath.sfence.bits.hg)) { valid := 0.U } val lcount = if (s2) aux_count else count for (i <- 0 until pgLevels-1) { ccover(hit && state === s_req && lcount === i.U, s"PTE_CACHE_HIT_L$i", s"PTE cache hit, level $i") } (hit, Mux1H(hits, data)) } // generate pte_cache val (pte_cache_hit, pte_cache_data) = makePTECache(false) // generate pte_cache with 2-stage translation val (stage2_pte_cache_hit, stage2_pte_cache_data) = makePTECache(true) // pte_cache hit or 2-stage pte_cache hit val pte_hit = RegNext(false.B) io.dpath.perf.pte_miss := false.B io.dpath.perf.pte_hit := pte_hit && (state === s_req) && !io.dpath.perf.l2hit assert(!(io.dpath.perf.l2hit && (io.dpath.perf.pte_miss || io.dpath.perf.pte_hit)), "PTE Cache Hit/Miss Performance Monitor Events are lower priority than L2TLB Hit event") // l2_refill happens when find the leaf pte val l2_refill = RegNext(false.B) l2_refill_wire := l2_refill io.dpath.perf.l2miss := false.B io.dpath.perf.l2hit := false.B // l2tlb val (l2_hit, l2_error, l2_pte, l2_tlb_ram) = if (coreParams.nL2TLBEntries == 0) (false.B, false.B, WireDefault(0.U.asTypeOf(new PTE)), None) else { val code = new ParityCode require(isPow2(coreParams.nL2TLBEntries)) require(isPow2(coreParams.nL2TLBWays)) require(coreParams.nL2TLBEntries >= coreParams.nL2TLBWays) val nL2TLBSets = coreParams.nL2TLBEntries / coreParams.nL2TLBWays require(isPow2(nL2TLBSets)) val idxBits = log2Ceil(nL2TLBSets) val l2_plru = new SetAssocLRU(nL2TLBSets, coreParams.nL2TLBWays, "plru") val ram = DescribedSRAM( name = "l2_tlb_ram", desc = "L2 TLB", size = nL2TLBSets, data = Vec(coreParams.nL2TLBWays, UInt(code.width(new L2TLBEntry(nL2TLBSets).getWidth).W)) ) val g = Reg(Vec(coreParams.nL2TLBWays, UInt(nL2TLBSets.W))) val valid = RegInit(VecInit(Seq.fill(coreParams.nL2TLBWays)(0.U(nL2TLBSets.W)))) // use r_req to construct tag val (r_tag, r_idx) = Split(Cat(r_req.vstage1, r_req.addr(maxSVAddrBits-pgIdxBits-1, 0)), idxBits) /** the valid vec for the selected set(including n ways) */ val r_valid_vec = valid.map(_(r_idx)).asUInt val r_valid_vec_q = Reg(UInt(coreParams.nL2TLBWays.W)) val r_l2_plru_way = Reg(UInt(log2Ceil(coreParams.nL2TLBWays max 1).W)) r_valid_vec_q := r_valid_vec // replacement way r_l2_plru_way := (if (coreParams.nL2TLBWays > 1) l2_plru.way(r_idx) else 0.U) // refill with r_pte(leaf pte) when (l2_refill && !invalidated) { val entry = Wire(new L2TLBEntry(nL2TLBSets)) entry.ppn := r_pte.ppn entry.d := r_pte.d entry.a := r_pte.a entry.u := r_pte.u entry.x := r_pte.x entry.w := r_pte.w entry.r := r_pte.r entry.tag := r_tag // if all the way are valid, use plru to select one way to be replaced, // otherwise use PriorityEncoderOH to select one val wmask = if (coreParams.nL2TLBWays > 1) Mux(r_valid_vec_q.andR, UIntToOH(r_l2_plru_way, coreParams.nL2TLBWays), PriorityEncoderOH(~r_valid_vec_q)) else 1.U(1.W) ram.write(r_idx, VecInit(Seq.fill(coreParams.nL2TLBWays)(code.encode(entry.asUInt))), wmask.asBools) val mask = UIntToOH(r_idx) for (way <- 0 until coreParams.nL2TLBWays) { when (wmask(way)) { valid(way) := valid(way) | mask g(way) := Mux(r_pte.g, g(way) | mask, g(way) & ~mask) } } } // sfence happens when (io.dpath.sfence.valid) { val hg = usingHypervisor.B && io.dpath.sfence.bits.hg for (way <- 0 until coreParams.nL2TLBWays) { valid(way) := Mux(!hg && io.dpath.sfence.bits.rs1, valid(way) & ~UIntToOH(io.dpath.sfence.bits.addr(idxBits+pgIdxBits-1, pgIdxBits)), Mux(!hg && io.dpath.sfence.bits.rs2, valid(way) & g(way), 0.U)) } } val s0_valid = !l2_refill && arb.io.out.fire val s0_suitable = arb.io.out.bits.bits.vstage1 === arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.need_gpa val s1_valid = RegNext(s0_valid && s0_suitable && arb.io.out.bits.valid) val s2_valid = RegNext(s1_valid) // read from tlb idx val s1_rdata = ram.read(arb.io.out.bits.bits.addr(idxBits-1, 0), s0_valid) val s2_rdata = s1_rdata.map(s1_rdway => code.decode(RegEnable(s1_rdway, s1_valid))) val s2_valid_vec = RegEnable(r_valid_vec, s1_valid) val s2_g_vec = RegEnable(VecInit(g.map(_(r_idx))), s1_valid) val s2_error = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && s2_rdata(way).error).orR when (s2_valid && s2_error) { valid.foreach { _ := 0.U }} // decode val s2_entry_vec = s2_rdata.map(_.uncorrected.asTypeOf(new L2TLBEntry(nL2TLBSets))) val s2_hit_vec = (0 until coreParams.nL2TLBWays).map(way => s2_valid_vec(way) && (r_tag === s2_entry_vec(way).tag)) val s2_hit = s2_valid && s2_hit_vec.orR io.dpath.perf.l2miss := s2_valid && !(s2_hit_vec.orR) io.dpath.perf.l2hit := s2_hit when (s2_hit) { l2_plru.access(r_idx, OHToUInt(s2_hit_vec)) assert((PopCount(s2_hit_vec) === 1.U) || s2_error, "L2 TLB multi-hit") } val s2_pte = Wire(new PTE) val s2_hit_entry = Mux1H(s2_hit_vec, s2_entry_vec) s2_pte.ppn := s2_hit_entry.ppn s2_pte.d := s2_hit_entry.d s2_pte.a := s2_hit_entry.a s2_pte.g := Mux1H(s2_hit_vec, s2_g_vec) s2_pte.u := s2_hit_entry.u s2_pte.x := s2_hit_entry.x s2_pte.w := s2_hit_entry.w s2_pte.r := s2_hit_entry.r s2_pte.v := true.B s2_pte.reserved_for_future := 0.U s2_pte.reserved_for_software := 0.U for (way <- 0 until coreParams.nL2TLBWays) { ccover(s2_hit && s2_hit_vec(way), s"L2_TLB_HIT_WAY$way", s"L2 TLB hit way$way") } (s2_hit, s2_error, s2_pte, Some(ram)) } // if SFENCE occurs during walk, don't refill PTE cache or L2 TLB until next walk invalidated := io.dpath.sfence.valid || (invalidated && state =/= s_ready) // mem request io.mem.keep_clock_enabled := false.B io.mem.req.valid := state === s_req || state === s_dummy1 io.mem.req.bits.phys := true.B io.mem.req.bits.cmd := M_XRD io.mem.req.bits.size := log2Ceil(xLen/8).U io.mem.req.bits.signed := false.B io.mem.req.bits.addr := pte_addr io.mem.req.bits.idx.foreach(_ := pte_addr) io.mem.req.bits.dprv := PRV.S.U // PTW accesses are S-mode by definition io.mem.req.bits.dv := do_both_stages && !stage2 io.mem.req.bits.tag := DontCare io.mem.req.bits.no_resp := false.B io.mem.req.bits.no_alloc := DontCare io.mem.req.bits.no_xcpt := DontCare io.mem.req.bits.data := DontCare io.mem.req.bits.mask := DontCare io.mem.s1_kill := l2_hit || (state =/= s_wait1) || resp_gf io.mem.s1_data := DontCare io.mem.s2_kill := false.B val pageGranularityPMPs = pmpGranularity >= (1 << pgIdxBits) require(!usingHypervisor || pageGranularityPMPs, s"hypervisor requires pmpGranularity >= ${1<<pgIdxBits}") val pmaPgLevelHomogeneous = (0 until pgLevels) map { i => val pgSize = BigInt(1) << (pgIdxBits + ((pgLevels - 1 - i) * pgLevelBits)) if (pageGranularityPMPs && i == pgLevels - 1) { require(TLBPageLookup.homogeneous(edge.manager.managers, pgSize), s"All memory regions must be $pgSize-byte aligned") true.B } else { TLBPageLookup(edge.manager.managers, xLen, p(CacheBlockBytes), pgSize, xLen/8)(r_pte.ppn << pgIdxBits).homogeneous } } val pmaHomogeneous = pmaPgLevelHomogeneous(count) val pmpHomogeneous = new PMPHomogeneityChecker(io.dpath.pmp).apply(r_pte.ppn << pgIdxBits, count) val homogeneous = pmaHomogeneous && pmpHomogeneous // response to tlb for (i <- 0 until io.requestor.size) { io.requestor(i).resp.valid := resp_valid(i) io.requestor(i).resp.bits.ae_ptw := resp_ae_ptw io.requestor(i).resp.bits.ae_final := resp_ae_final io.requestor(i).resp.bits.pf := resp_pf io.requestor(i).resp.bits.gf := resp_gf io.requestor(i).resp.bits.hr := resp_hr io.requestor(i).resp.bits.hw := resp_hw io.requestor(i).resp.bits.hx := resp_hx io.requestor(i).resp.bits.pte := r_pte io.requestor(i).resp.bits.level := max_count io.requestor(i).resp.bits.homogeneous := homogeneous || pageGranularityPMPs.B io.requestor(i).resp.bits.fragmented_superpage := resp_fragmented_superpage && pageGranularityPMPs.B io.requestor(i).resp.bits.gpa.valid := r_req.need_gpa io.requestor(i).resp.bits.gpa.bits := Cat(Mux(!stage2_final || !r_req.vstage1 || aux_count === (pgLevels - 1).U, aux_pte.ppn, makeFragmentedSuperpagePPN(aux_pte.ppn)(aux_count)), gpa_pgoff) io.requestor(i).resp.bits.gpa_is_pte := !stage2_final io.requestor(i).ptbr := io.dpath.ptbr io.requestor(i).hgatp := io.dpath.hgatp io.requestor(i).vsatp := io.dpath.vsatp io.requestor(i).customCSRs <> io.dpath.customCSRs io.requestor(i).status := io.dpath.status io.requestor(i).hstatus := io.dpath.hstatus io.requestor(i).gstatus := io.dpath.gstatus io.requestor(i).pmp := io.dpath.pmp } // control state machine val next_state = WireDefault(state) state := OptimizationBarrier(next_state) val do_switch = WireDefault(false.B) switch (state) { is (s_ready) { when (arb.io.out.fire) { val satp_initial_count = pgLevels.U - minPgLevels.U - satp.additionalPgLevels val vsatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.vsatp.additionalPgLevels val hgatp_initial_count = pgLevels.U - minPgLevels.U - io.dpath.hgatp.additionalPgLevels val aux_ppn = Mux(arb.io.out.bits.bits.vstage1, io.dpath.vsatp.ppn, arb.io.out.bits.bits.addr) r_req := arb.io.out.bits.bits r_req_dest := arb.io.chosen next_state := Mux(arb.io.out.bits.valid, s_req, s_ready) stage2 := arb.io.out.bits.bits.stage2 stage2_final := arb.io.out.bits.bits.stage2 && !arb.io.out.bits.bits.vstage1 count := Mux(arb.io.out.bits.bits.stage2, hgatp_initial_count, satp_initial_count) aux_count := Mux(arb.io.out.bits.bits.vstage1, vsatp_initial_count, 0.U) aux_pte.ppn := aux_ppn aux_pte.reserved_for_future := 0.U resp_ae_ptw := false.B resp_ae_final := false.B resp_pf := false.B resp_gf := checkInvalidHypervisorGPA(io.dpath.hgatp, aux_ppn) && arb.io.out.bits.bits.stage2 resp_hr := true.B resp_hw := true.B resp_hx := true.B resp_fragmented_superpage := false.B r_hgatp := io.dpath.hgatp assert(!arb.io.out.bits.bits.need_gpa || arb.io.out.bits.bits.stage2) } } is (s_req) { when(stage2 && count === r_hgatp_initial_count) { gpa_pgoff := Mux(aux_count === (pgLevels-1).U, r_req.addr << (xLen/8).log2, stage2_pte_cache_addr) } // pte_cache hit when (stage2_pte_cache_hit) { aux_count := aux_count + 1.U aux_pte.ppn := stage2_pte_cache_data aux_pte.reserved_for_future := 0.U pte_hit := true.B }.elsewhen (pte_cache_hit) { count := count + 1.U pte_hit := true.B }.otherwise { next_state := Mux(io.mem.req.ready, s_wait1, s_req) } when(resp_gf) { next_state := s_ready resp_valid(r_req_dest) := true.B } } is (s_wait1) { // This Mux is for the l2_error case; the l2_hit && !l2_error case is overriden below next_state := Mux(l2_hit, s_req, s_wait2) } is (s_wait2) { next_state := s_wait3 io.dpath.perf.pte_miss := count < (pgLevels-1).U when (io.mem.s2_xcpt.ae.ld) { resp_ae_ptw := true.B next_state := s_ready resp_valid(r_req_dest) := true.B } } is (s_fragment_superpage) { next_state := s_ready resp_valid(r_req_dest) := true.B when (!homogeneous) { count := (pgLevels-1).U resp_fragmented_superpage := true.B } when (do_both_stages) { resp_fragmented_superpage := true.B } } } val merged_pte = { val superpage_masks = (0 until pgLevels).map(i => ((BigInt(1) << pte.ppn.getWidth) - (BigInt(1) << (pgLevels-1-i)*pgLevelBits)).U) val superpage_mask = superpage_masks(Mux(stage2_final, max_count, (pgLevels-1).U)) val stage1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), aux_pte.ppn((pgLevels-i-1)*pgLevelBits-1,0))) :+ pte.ppn val stage1_ppn = stage1_ppns(count) makePTE(stage1_ppn & superpage_mask, aux_pte) } r_pte := OptimizationBarrier( // l2tlb hit->find a leaf PTE(l2_pte), respond to L1TLB Mux(l2_hit && !l2_error && !resp_gf, l2_pte, // S2 PTE cache hit -> proceed to the next level of walking, update the r_pte with hgatp Mux(state === s_req && stage2_pte_cache_hit, makeHypervisorRootPTE(r_hgatp, stage2_pte_cache_data, l2_pte), // pte cache hit->find a non-leaf PTE(pte_cache),continue to request mem Mux(state === s_req && pte_cache_hit, makePTE(pte_cache_data, l2_pte), // 2-stage translation Mux(do_switch, makeHypervisorRootPTE(r_hgatp, pte.ppn, r_pte), // when mem respond, store mem.resp.pte Mux(mem_resp_valid, Mux(!traverse && r_req.vstage1 && stage2, merged_pte, pte), // fragment_superpage Mux(state === s_fragment_superpage && !homogeneous && count =/= (pgLevels - 1).U, makePTE(makeFragmentedSuperpagePPN(r_pte.ppn)(count), r_pte), // when tlb request come->request mem, use root address in satp(or vsatp,hgatp) Mux(arb.io.out.fire, Mux(arb.io.out.bits.bits.stage2, makeHypervisorRootPTE(io.dpath.hgatp, io.dpath.vsatp.ppn, r_pte), makePTE(satp.ppn, r_pte)), r_pte)))))))) when (l2_hit && !l2_error && !resp_gf) { assert(state === s_req || state === s_wait1) next_state := s_ready resp_valid(r_req_dest) := true.B count := (pgLevels-1).U } when (mem_resp_valid) { assert(state === s_wait3) next_state := s_req when (traverse) { when (do_both_stages && !stage2) { do_switch := true.B } count := count + 1.U }.otherwise { val gf = (stage2 && !stage2_final && !pte.ur()) || (pte.leaf() && pte.reserved_for_future === 0.U && invalid_gpa) val ae = pte.v && invalid_paddr val pf = pte.v && pte.reserved_for_future =/= 0.U val success = pte.v && !ae && !pf && !gf when (do_both_stages && !stage2_final && success) { when (stage2) { stage2 := false.B count := aux_count }.otherwise { stage2_final := true.B do_switch := true.B } }.otherwise { // find a leaf pte, start l2 refill l2_refill := success && count === (pgLevels-1).U && !r_req.need_gpa && (!r_req.vstage1 && !r_req.stage2 || do_both_stages && aux_count === (pgLevels-1).U && pte.isFullPerm()) count := max_count when (pageGranularityPMPs.B && !(count === (pgLevels-1).U && (!do_both_stages || aux_count === (pgLevels-1).U))) { next_state := s_fragment_superpage }.otherwise { next_state := s_ready resp_valid(r_req_dest) := true.B } resp_ae_ptw := ae && count < (pgLevels-1).U && pte.table() resp_ae_final := ae && pte.leaf() resp_pf := pf && !stage2 resp_gf := gf || (pf && stage2) resp_hr := !stage2 || (!pf && !gf && pte.ur()) resp_hw := !stage2 || (!pf && !gf && pte.uw()) resp_hx := !stage2 || (!pf && !gf && pte.ux()) } } } when (io.mem.s2_nack) { assert(state === s_wait2) next_state := s_req } when (do_switch) { aux_count := Mux(traverse, count + 1.U, count) count := r_hgatp_initial_count aux_pte := Mux(traverse, pte, { val s1_ppns = (0 until pgLevels-1).map(i => Cat(pte.ppn(pte.ppn.getWidth-1, (pgLevels-i-1)*pgLevelBits), r_req.addr(((pgLevels-i-1)*pgLevelBits min vpnBits)-1,0).padTo((pgLevels-i-1)*pgLevelBits))) :+ pte.ppn makePTE(s1_ppns(count), pte) }) stage2 := true.B } for (i <- 0 until pgLevels) { val leaf = mem_resp_valid && !traverse && count === i.U ccover(leaf && pte.v && !invalid_paddr && !invalid_gpa && pte.reserved_for_future === 0.U, s"L$i", s"successful page-table access, level $i") ccover(leaf && pte.v && invalid_paddr, s"L${i}_BAD_PPN_MSB", s"PPN too large, level $i") ccover(leaf && pte.v && invalid_gpa, s"L${i}_BAD_GPA_MSB", s"GPA too large, level $i") ccover(leaf && pte.v && pte.reserved_for_future =/= 0.U, s"L${i}_BAD_RSV_MSB", s"reserved MSBs set, level $i") ccover(leaf && !mem_resp_data(0), s"L${i}_INVALID_PTE", s"page not present, level $i") if (i != pgLevels-1) ccover(leaf && !pte.v && mem_resp_data(0), s"L${i}_BAD_PPN_LSB", s"PPN LSBs not zero, level $i") } ccover(mem_resp_valid && count === (pgLevels-1).U && pte.table(), s"TOO_DEEP", s"page table too deep") ccover(io.mem.s2_nack, "NACK", "D$ nacked page-table access") ccover(state === s_wait2 && io.mem.s2_xcpt.ae.ld, "AE", "access exception while walking page table") } // leaving gated-clock domain private def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = if (usingVM) property.cover(cond, s"PTW_$label", "MemorySystem;;" + desc) /** Relace PTE.ppn with ppn */ private def makePTE(ppn: UInt, default: PTE) = { val pte = WireDefault(default) pte.ppn := ppn pte } /** use hgatp and vpn to construct a new ppn */ private def makeHypervisorRootPTE(hgatp: PTBR, vpn: UInt, default: PTE) = { val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> (pgLevels-i)*pgLevelBits)) val lsbs = WireDefault(UInt(maxHypervisorExtraAddrBits.W), idxs(count)) val pte = WireDefault(default) pte.ppn := Cat(hgatp.ppn >> maxHypervisorExtraAddrBits, lsbs) pte } /** use hgatp and vpn to check for gpa out of range */ private def checkInvalidHypervisorGPA(hgatp: PTBR, vpn: UInt) = { val count = pgLevels.U - minPgLevels.U - hgatp.additionalPgLevels val idxs = (0 to pgLevels-minPgLevels).map(i => (vpn >> ((pgLevels-i)*pgLevelBits)+maxHypervisorExtraAddrBits)) idxs.extract(count) =/= 0.U } } /** Mix-ins for constructing tiles that might have a PTW */ trait CanHavePTW extends HasTileParameters with HasHellaCache { this: BaseTile => val module: CanHavePTWModule var nPTWPorts = 1 nDCachePorts += usingPTW.toInt } trait CanHavePTWModule extends HasHellaCacheModule { val outer: CanHavePTW val ptwPorts = ListBuffer(outer.dcache.module.io.ptw) val ptw = Module(new PTW(outer.nPTWPorts)(outer.dcache.node.edges.out(0), outer.p)) ptw.io.mem <> DontCare if (outer.usingPTW) { dcachePorts += ptw.io.mem } }
module DTLB( // @[TLB.scala:318:7] input clock, // @[TLB.scala:318:7] input reset, // @[TLB.scala:318:7] output io_req_ready, // @[TLB.scala:320:14] input io_req_valid, // @[TLB.scala:320:14] input [48:0] io_req_bits_vaddr, // @[TLB.scala:320:14] input io_req_bits_passthrough, // @[TLB.scala:320:14] input [1:0] io_req_bits_size, // @[TLB.scala:320:14] input [4:0] io_req_bits_cmd, // @[TLB.scala:320:14] input [1:0] io_req_bits_prv, // @[TLB.scala:320:14] input io_req_bits_v, // @[TLB.scala:320:14] output io_resp_miss, // @[TLB.scala:320:14] output [31:0] io_resp_paddr, // @[TLB.scala:320:14] output [48:0] io_resp_gpa, // @[TLB.scala:320:14] output io_resp_pf_ld, // @[TLB.scala:320:14] output io_resp_pf_st, // @[TLB.scala:320:14] output io_resp_pf_inst, // @[TLB.scala:320:14] output io_resp_ae_ld, // @[TLB.scala:320:14] output io_resp_ae_st, // @[TLB.scala:320:14] output io_resp_ae_inst, // @[TLB.scala:320:14] output io_resp_ma_ld, // @[TLB.scala:320:14] output io_resp_ma_st, // @[TLB.scala:320:14] output io_resp_cacheable, // @[TLB.scala:320:14] output io_resp_must_alloc, // @[TLB.scala:320:14] output io_resp_prefetchable, // @[TLB.scala:320:14] output [1:0] io_resp_size, // @[TLB.scala:320:14] output [4:0] io_resp_cmd, // @[TLB.scala:320:14] input io_sfence_valid, // @[TLB.scala:320:14] input io_sfence_bits_rs1, // @[TLB.scala:320:14] input io_sfence_bits_rs2, // @[TLB.scala:320:14] input [47:0] io_sfence_bits_addr, // @[TLB.scala:320:14] input io_sfence_bits_asid, // @[TLB.scala:320:14] input io_sfence_bits_hv, // @[TLB.scala:320:14] input io_sfence_bits_hg, // @[TLB.scala:320:14] input io_ptw_req_ready, // @[TLB.scala:320:14] output io_ptw_req_valid, // @[TLB.scala:320:14] output [35:0] io_ptw_req_bits_bits_addr, // @[TLB.scala:320:14] output io_ptw_req_bits_bits_need_gpa, // @[TLB.scala:320:14] input io_ptw_resp_valid, // @[TLB.scala:320:14] input io_ptw_resp_bits_ae_ptw, // @[TLB.scala:320:14] input io_ptw_resp_bits_ae_final, // @[TLB.scala:320:14] input io_ptw_resp_bits_pf, // @[TLB.scala:320:14] input io_ptw_resp_bits_gf, // @[TLB.scala:320:14] input io_ptw_resp_bits_hr, // @[TLB.scala:320:14] input io_ptw_resp_bits_hw, // @[TLB.scala:320:14] input io_ptw_resp_bits_hx, // @[TLB.scala:320:14] input [9:0] io_ptw_resp_bits_pte_reserved_for_future, // @[TLB.scala:320:14] input [43:0] io_ptw_resp_bits_pte_ppn, // @[TLB.scala:320:14] input [1:0] io_ptw_resp_bits_pte_reserved_for_software, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_d, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_a, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_g, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_u, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_x, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_w, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_r, // @[TLB.scala:320:14] input io_ptw_resp_bits_pte_v, // @[TLB.scala:320:14] input [1:0] io_ptw_resp_bits_level, // @[TLB.scala:320:14] input io_ptw_resp_bits_homogeneous, // @[TLB.scala:320:14] input io_ptw_resp_bits_gpa_valid, // @[TLB.scala:320:14] input [47:0] io_ptw_resp_bits_gpa_bits, // @[TLB.scala:320:14] input io_ptw_resp_bits_gpa_is_pte, // @[TLB.scala:320:14] input [3:0] io_ptw_ptbr_mode, // @[TLB.scala:320:14] input [43:0] io_ptw_ptbr_ppn, // @[TLB.scala:320:14] input io_ptw_status_debug, // @[TLB.scala:320:14] input io_ptw_status_cease, // @[TLB.scala:320:14] input io_ptw_status_wfi, // @[TLB.scala:320:14] input [31:0] io_ptw_status_isa, // @[TLB.scala:320:14] input [1:0] io_ptw_status_dprv, // @[TLB.scala:320:14] input io_ptw_status_dv, // @[TLB.scala:320:14] input [1:0] io_ptw_status_prv, // @[TLB.scala:320:14] input io_ptw_status_v, // @[TLB.scala:320:14] input io_ptw_status_sd, // @[TLB.scala:320:14] input io_ptw_status_mpv, // @[TLB.scala:320:14] input io_ptw_status_gva, // @[TLB.scala:320:14] input io_ptw_status_tsr, // @[TLB.scala:320:14] input io_ptw_status_tw, // @[TLB.scala:320:14] input io_ptw_status_tvm, // @[TLB.scala:320:14] input io_ptw_status_mxr, // @[TLB.scala:320:14] input io_ptw_status_sum, // @[TLB.scala:320:14] input io_ptw_status_mprv, // @[TLB.scala:320:14] input [1:0] io_ptw_status_fs, // @[TLB.scala:320:14] input [1:0] io_ptw_status_mpp, // @[TLB.scala:320:14] input io_ptw_status_spp, // @[TLB.scala:320:14] input io_ptw_status_mpie, // @[TLB.scala:320:14] input io_ptw_status_spie, // @[TLB.scala:320:14] input io_ptw_status_mie, // @[TLB.scala:320:14] input io_ptw_status_sie, // @[TLB.scala:320:14] input io_ptw_hstatus_spvp, // @[TLB.scala:320:14] input io_ptw_hstatus_spv, // @[TLB.scala:320:14] input io_ptw_hstatus_gva, // @[TLB.scala:320:14] input io_ptw_gstatus_debug, // @[TLB.scala:320:14] input io_ptw_gstatus_cease, // @[TLB.scala:320:14] input io_ptw_gstatus_wfi, // @[TLB.scala:320:14] input [31:0] io_ptw_gstatus_isa, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_dprv, // @[TLB.scala:320:14] input io_ptw_gstatus_dv, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_prv, // @[TLB.scala:320:14] input io_ptw_gstatus_v, // @[TLB.scala:320:14] input io_ptw_gstatus_sd, // @[TLB.scala:320:14] input [22:0] io_ptw_gstatus_zero2, // @[TLB.scala:320:14] input io_ptw_gstatus_mpv, // @[TLB.scala:320:14] input io_ptw_gstatus_gva, // @[TLB.scala:320:14] input io_ptw_gstatus_mbe, // @[TLB.scala:320:14] input io_ptw_gstatus_sbe, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_sxl, // @[TLB.scala:320:14] input [7:0] io_ptw_gstatus_zero1, // @[TLB.scala:320:14] input io_ptw_gstatus_tsr, // @[TLB.scala:320:14] input io_ptw_gstatus_tw, // @[TLB.scala:320:14] input io_ptw_gstatus_tvm, // @[TLB.scala:320:14] input io_ptw_gstatus_mxr, // @[TLB.scala:320:14] input io_ptw_gstatus_sum, // @[TLB.scala:320:14] input io_ptw_gstatus_mprv, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_fs, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_mpp, // @[TLB.scala:320:14] input [1:0] io_ptw_gstatus_vs, // @[TLB.scala:320:14] input io_ptw_gstatus_spp, // @[TLB.scala:320:14] input io_ptw_gstatus_mpie, // @[TLB.scala:320:14] input io_ptw_gstatus_ube, // @[TLB.scala:320:14] input io_ptw_gstatus_spie, // @[TLB.scala:320:14] input io_ptw_gstatus_upie, // @[TLB.scala:320:14] input io_ptw_gstatus_mie, // @[TLB.scala:320:14] input io_ptw_gstatus_hie, // @[TLB.scala:320:14] input io_ptw_gstatus_sie, // @[TLB.scala:320:14] input io_ptw_gstatus_uie, // @[TLB.scala:320:14] input io_ptw_pmp_0_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_0_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_0_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_0_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_0_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_0_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_0_mask, // @[TLB.scala:320:14] input io_ptw_pmp_1_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_1_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_1_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_1_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_1_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_1_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_1_mask, // @[TLB.scala:320:14] input io_ptw_pmp_2_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_2_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_2_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_2_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_2_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_2_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_2_mask, // @[TLB.scala:320:14] input io_ptw_pmp_3_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_3_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_3_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_3_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_3_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_3_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_3_mask, // @[TLB.scala:320:14] input io_ptw_pmp_4_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_4_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_4_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_4_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_4_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_4_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_4_mask, // @[TLB.scala:320:14] input io_ptw_pmp_5_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_5_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_5_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_5_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_5_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_5_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_5_mask, // @[TLB.scala:320:14] input io_ptw_pmp_6_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_6_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_6_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_6_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_6_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_6_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_6_mask, // @[TLB.scala:320:14] input io_ptw_pmp_7_cfg_l, // @[TLB.scala:320:14] input [1:0] io_ptw_pmp_7_cfg_a, // @[TLB.scala:320:14] input io_ptw_pmp_7_cfg_x, // @[TLB.scala:320:14] input io_ptw_pmp_7_cfg_w, // @[TLB.scala:320:14] input io_ptw_pmp_7_cfg_r, // @[TLB.scala:320:14] input [29:0] io_ptw_pmp_7_addr, // @[TLB.scala:320:14] input [31:0] io_ptw_pmp_7_mask, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_0_ren, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_0_wen, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_0_wdata, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_0_value, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_1_ren, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_1_wen, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_1_wdata, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_1_value, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_2_ren, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_2_wen, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_2_wdata, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_2_value, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_3_ren, // @[TLB.scala:320:14] input io_ptw_customCSRs_csrs_3_wen, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_3_wdata, // @[TLB.scala:320:14] input [63:0] io_ptw_customCSRs_csrs_3_value // @[TLB.scala:320:14] ); wire [19:0] _entries_barrier_12_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_12_io_y_u; // @[package.scala:267:25] wire _entries_barrier_12_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_12_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_12_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_12_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_12_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_12_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_12_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_12_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_12_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_12_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_12_io_y_hr; // @[package.scala:267:25] wire [19:0] _entries_barrier_11_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_11_io_y_u; // @[package.scala:267:25] wire _entries_barrier_11_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_11_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_11_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_11_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_11_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_11_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_11_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_11_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_11_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_11_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_11_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_11_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_11_io_y_px; // @[package.scala:267:25] wire _entries_barrier_11_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_11_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_11_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_11_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_11_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_11_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_10_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_10_io_y_u; // @[package.scala:267:25] wire _entries_barrier_10_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_10_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_10_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_10_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_10_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_10_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_10_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_10_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_10_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_10_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_10_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_10_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_10_io_y_px; // @[package.scala:267:25] wire _entries_barrier_10_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_10_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_10_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_10_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_10_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_10_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_9_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_9_io_y_u; // @[package.scala:267:25] wire _entries_barrier_9_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_9_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_9_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_9_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_9_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_9_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_9_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_9_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_9_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_9_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_9_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_9_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_9_io_y_px; // @[package.scala:267:25] wire _entries_barrier_9_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_9_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_9_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_9_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_9_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_9_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_8_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_8_io_y_u; // @[package.scala:267:25] wire _entries_barrier_8_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_8_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_8_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_8_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_8_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_8_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_8_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_8_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_8_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_8_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_8_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_8_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_8_io_y_px; // @[package.scala:267:25] wire _entries_barrier_8_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_8_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_8_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_8_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_8_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_8_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_7_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_7_io_y_u; // @[package.scala:267:25] wire _entries_barrier_7_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_7_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_7_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_7_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_7_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_7_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_7_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_7_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_7_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_7_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_7_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_7_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_7_io_y_px; // @[package.scala:267:25] wire _entries_barrier_7_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_7_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_7_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_7_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_7_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_7_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_6_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_6_io_y_u; // @[package.scala:267:25] wire _entries_barrier_6_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_6_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_6_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_6_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_6_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_6_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_6_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_6_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_6_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_6_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_6_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_6_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_6_io_y_px; // @[package.scala:267:25] wire _entries_barrier_6_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_6_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_6_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_6_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_6_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_6_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_5_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_5_io_y_u; // @[package.scala:267:25] wire _entries_barrier_5_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_5_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_5_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_5_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_5_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_5_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_5_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_5_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_5_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_5_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_5_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_5_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_5_io_y_px; // @[package.scala:267:25] wire _entries_barrier_5_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_5_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_5_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_5_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_5_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_5_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_4_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_4_io_y_u; // @[package.scala:267:25] wire _entries_barrier_4_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_4_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_4_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_4_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_4_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_4_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_4_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_4_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_4_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_4_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_4_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_4_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_4_io_y_px; // @[package.scala:267:25] wire _entries_barrier_4_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_4_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_4_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_4_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_4_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_4_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_3_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_3_io_y_u; // @[package.scala:267:25] wire _entries_barrier_3_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_3_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_3_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_3_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_3_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_3_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_3_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_3_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_3_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_3_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_3_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_3_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_3_io_y_px; // @[package.scala:267:25] wire _entries_barrier_3_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_3_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_3_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_3_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_3_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_3_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_2_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_2_io_y_u; // @[package.scala:267:25] wire _entries_barrier_2_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_2_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_2_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_2_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_2_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_2_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_2_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_2_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_2_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_2_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_2_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_2_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_2_io_y_px; // @[package.scala:267:25] wire _entries_barrier_2_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_2_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_2_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_2_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_2_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_2_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_1_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_1_io_y_u; // @[package.scala:267:25] wire _entries_barrier_1_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_1_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_1_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_1_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_1_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_1_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_1_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_1_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_1_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_1_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_1_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_1_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_1_io_y_px; // @[package.scala:267:25] wire _entries_barrier_1_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_1_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_1_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_1_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_1_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_1_io_y_c; // @[package.scala:267:25] wire [19:0] _entries_barrier_io_y_ppn; // @[package.scala:267:25] wire _entries_barrier_io_y_u; // @[package.scala:267:25] wire _entries_barrier_io_y_ae_ptw; // @[package.scala:267:25] wire _entries_barrier_io_y_ae_final; // @[package.scala:267:25] wire _entries_barrier_io_y_ae_stage2; // @[package.scala:267:25] wire _entries_barrier_io_y_pf; // @[package.scala:267:25] wire _entries_barrier_io_y_gf; // @[package.scala:267:25] wire _entries_barrier_io_y_sw; // @[package.scala:267:25] wire _entries_barrier_io_y_sx; // @[package.scala:267:25] wire _entries_barrier_io_y_sr; // @[package.scala:267:25] wire _entries_barrier_io_y_hw; // @[package.scala:267:25] wire _entries_barrier_io_y_hx; // @[package.scala:267:25] wire _entries_barrier_io_y_hr; // @[package.scala:267:25] wire _entries_barrier_io_y_pw; // @[package.scala:267:25] wire _entries_barrier_io_y_px; // @[package.scala:267:25] wire _entries_barrier_io_y_pr; // @[package.scala:267:25] wire _entries_barrier_io_y_ppp; // @[package.scala:267:25] wire _entries_barrier_io_y_pal; // @[package.scala:267:25] wire _entries_barrier_io_y_paa; // @[package.scala:267:25] wire _entries_barrier_io_y_eff; // @[package.scala:267:25] wire _entries_barrier_io_y_c; // @[package.scala:267:25] wire _pma_io_resp_r; // @[TLB.scala:422:19] wire _pma_io_resp_w; // @[TLB.scala:422:19] wire _pma_io_resp_pp; // @[TLB.scala:422:19] wire _pma_io_resp_al; // @[TLB.scala:422:19] wire _pma_io_resp_aa; // @[TLB.scala:422:19] wire _pma_io_resp_x; // @[TLB.scala:422:19] wire _pma_io_resp_eff; // @[TLB.scala:422:19] wire _pmp_io_r; // @[TLB.scala:416:19] wire _pmp_io_w; // @[TLB.scala:416:19] wire _pmp_io_x; // @[TLB.scala:416:19] wire [19:0] _mpu_ppn_barrier_io_y_ppn; // @[package.scala:267:25] wire io_req_valid_0 = io_req_valid; // @[TLB.scala:318:7] wire [48:0] io_req_bits_vaddr_0 = io_req_bits_vaddr; // @[TLB.scala:318:7] wire io_req_bits_passthrough_0 = io_req_bits_passthrough; // @[TLB.scala:318:7] wire [1:0] io_req_bits_size_0 = io_req_bits_size; // @[TLB.scala:318:7] wire [4:0] io_req_bits_cmd_0 = io_req_bits_cmd; // @[TLB.scala:318:7] wire [1:0] io_req_bits_prv_0 = io_req_bits_prv; // @[TLB.scala:318:7] wire io_req_bits_v_0 = io_req_bits_v; // @[TLB.scala:318:7] wire io_sfence_valid_0 = io_sfence_valid; // @[TLB.scala:318:7] wire io_sfence_bits_rs1_0 = io_sfence_bits_rs1; // @[TLB.scala:318:7] wire io_sfence_bits_rs2_0 = io_sfence_bits_rs2; // @[TLB.scala:318:7] wire [47:0] io_sfence_bits_addr_0 = io_sfence_bits_addr; // @[TLB.scala:318:7] wire io_sfence_bits_asid_0 = io_sfence_bits_asid; // @[TLB.scala:318:7] wire io_sfence_bits_hv_0 = io_sfence_bits_hv; // @[TLB.scala:318:7] wire io_sfence_bits_hg_0 = io_sfence_bits_hg; // @[TLB.scala:318:7] wire io_ptw_req_ready_0 = io_ptw_req_ready; // @[TLB.scala:318:7] wire io_ptw_resp_valid_0 = io_ptw_resp_valid; // @[TLB.scala:318:7] wire io_ptw_resp_bits_ae_ptw_0 = io_ptw_resp_bits_ae_ptw; // @[TLB.scala:318:7] wire io_ptw_resp_bits_ae_final_0 = io_ptw_resp_bits_ae_final; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pf_0 = io_ptw_resp_bits_pf; // @[TLB.scala:318:7] wire io_ptw_resp_bits_gf_0 = io_ptw_resp_bits_gf; // @[TLB.scala:318:7] wire io_ptw_resp_bits_hr_0 = io_ptw_resp_bits_hr; // @[TLB.scala:318:7] wire io_ptw_resp_bits_hw_0 = io_ptw_resp_bits_hw; // @[TLB.scala:318:7] wire io_ptw_resp_bits_hx_0 = io_ptw_resp_bits_hx; // @[TLB.scala:318:7] wire [9:0] io_ptw_resp_bits_pte_reserved_for_future_0 = io_ptw_resp_bits_pte_reserved_for_future; // @[TLB.scala:318:7] wire [43:0] io_ptw_resp_bits_pte_ppn_0 = io_ptw_resp_bits_pte_ppn; // @[TLB.scala:318:7] wire [1:0] io_ptw_resp_bits_pte_reserved_for_software_0 = io_ptw_resp_bits_pte_reserved_for_software; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_d_0 = io_ptw_resp_bits_pte_d; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_a_0 = io_ptw_resp_bits_pte_a; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_g_0 = io_ptw_resp_bits_pte_g; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_u_0 = io_ptw_resp_bits_pte_u; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_x_0 = io_ptw_resp_bits_pte_x; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_w_0 = io_ptw_resp_bits_pte_w; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_r_0 = io_ptw_resp_bits_pte_r; // @[TLB.scala:318:7] wire io_ptw_resp_bits_pte_v_0 = io_ptw_resp_bits_pte_v; // @[TLB.scala:318:7] wire [1:0] io_ptw_resp_bits_level_0 = io_ptw_resp_bits_level; // @[TLB.scala:318:7] wire io_ptw_resp_bits_homogeneous_0 = io_ptw_resp_bits_homogeneous; // @[TLB.scala:318:7] wire io_ptw_resp_bits_gpa_valid_0 = io_ptw_resp_bits_gpa_valid; // @[TLB.scala:318:7] wire [47:0] io_ptw_resp_bits_gpa_bits_0 = io_ptw_resp_bits_gpa_bits; // @[TLB.scala:318:7] wire io_ptw_resp_bits_gpa_is_pte_0 = io_ptw_resp_bits_gpa_is_pte; // @[TLB.scala:318:7] wire [3:0] io_ptw_ptbr_mode_0 = io_ptw_ptbr_mode; // @[TLB.scala:318:7] wire [43:0] io_ptw_ptbr_ppn_0 = io_ptw_ptbr_ppn; // @[TLB.scala:318:7] wire io_ptw_status_debug_0 = io_ptw_status_debug; // @[TLB.scala:318:7] wire io_ptw_status_cease_0 = io_ptw_status_cease; // @[TLB.scala:318:7] wire io_ptw_status_wfi_0 = io_ptw_status_wfi; // @[TLB.scala:318:7] wire [31:0] io_ptw_status_isa_0 = io_ptw_status_isa; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_dprv_0 = io_ptw_status_dprv; // @[TLB.scala:318:7] wire io_ptw_status_dv_0 = io_ptw_status_dv; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_prv_0 = io_ptw_status_prv; // @[TLB.scala:318:7] wire io_ptw_status_v_0 = io_ptw_status_v; // @[TLB.scala:318:7] wire io_ptw_status_sd_0 = io_ptw_status_sd; // @[TLB.scala:318:7] wire io_ptw_status_mpv_0 = io_ptw_status_mpv; // @[TLB.scala:318:7] wire io_ptw_status_gva_0 = io_ptw_status_gva; // @[TLB.scala:318:7] wire io_ptw_status_tsr_0 = io_ptw_status_tsr; // @[TLB.scala:318:7] wire io_ptw_status_tw_0 = io_ptw_status_tw; // @[TLB.scala:318:7] wire io_ptw_status_tvm_0 = io_ptw_status_tvm; // @[TLB.scala:318:7] wire io_ptw_status_mxr_0 = io_ptw_status_mxr; // @[TLB.scala:318:7] wire io_ptw_status_sum_0 = io_ptw_status_sum; // @[TLB.scala:318:7] wire io_ptw_status_mprv_0 = io_ptw_status_mprv; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_fs_0 = io_ptw_status_fs; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_mpp_0 = io_ptw_status_mpp; // @[TLB.scala:318:7] wire io_ptw_status_spp_0 = io_ptw_status_spp; // @[TLB.scala:318:7] wire io_ptw_status_mpie_0 = io_ptw_status_mpie; // @[TLB.scala:318:7] wire io_ptw_status_spie_0 = io_ptw_status_spie; // @[TLB.scala:318:7] wire io_ptw_status_mie_0 = io_ptw_status_mie; // @[TLB.scala:318:7] wire io_ptw_status_sie_0 = io_ptw_status_sie; // @[TLB.scala:318:7] wire io_ptw_hstatus_spvp_0 = io_ptw_hstatus_spvp; // @[TLB.scala:318:7] wire io_ptw_hstatus_spv_0 = io_ptw_hstatus_spv; // @[TLB.scala:318:7] wire io_ptw_hstatus_gva_0 = io_ptw_hstatus_gva; // @[TLB.scala:318:7] wire io_ptw_gstatus_debug_0 = io_ptw_gstatus_debug; // @[TLB.scala:318:7] wire io_ptw_gstatus_cease_0 = io_ptw_gstatus_cease; // @[TLB.scala:318:7] wire io_ptw_gstatus_wfi_0 = io_ptw_gstatus_wfi; // @[TLB.scala:318:7] wire [31:0] io_ptw_gstatus_isa_0 = io_ptw_gstatus_isa; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_dprv_0 = io_ptw_gstatus_dprv; // @[TLB.scala:318:7] wire io_ptw_gstatus_dv_0 = io_ptw_gstatus_dv; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_prv_0 = io_ptw_gstatus_prv; // @[TLB.scala:318:7] wire io_ptw_gstatus_v_0 = io_ptw_gstatus_v; // @[TLB.scala:318:7] wire io_ptw_gstatus_sd_0 = io_ptw_gstatus_sd; // @[TLB.scala:318:7] wire [22:0] io_ptw_gstatus_zero2_0 = io_ptw_gstatus_zero2; // @[TLB.scala:318:7] wire io_ptw_gstatus_mpv_0 = io_ptw_gstatus_mpv; // @[TLB.scala:318:7] wire io_ptw_gstatus_gva_0 = io_ptw_gstatus_gva; // @[TLB.scala:318:7] wire io_ptw_gstatus_mbe_0 = io_ptw_gstatus_mbe; // @[TLB.scala:318:7] wire io_ptw_gstatus_sbe_0 = io_ptw_gstatus_sbe; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_sxl_0 = io_ptw_gstatus_sxl; // @[TLB.scala:318:7] wire [7:0] io_ptw_gstatus_zero1_0 = io_ptw_gstatus_zero1; // @[TLB.scala:318:7] wire io_ptw_gstatus_tsr_0 = io_ptw_gstatus_tsr; // @[TLB.scala:318:7] wire io_ptw_gstatus_tw_0 = io_ptw_gstatus_tw; // @[TLB.scala:318:7] wire io_ptw_gstatus_tvm_0 = io_ptw_gstatus_tvm; // @[TLB.scala:318:7] wire io_ptw_gstatus_mxr_0 = io_ptw_gstatus_mxr; // @[TLB.scala:318:7] wire io_ptw_gstatus_sum_0 = io_ptw_gstatus_sum; // @[TLB.scala:318:7] wire io_ptw_gstatus_mprv_0 = io_ptw_gstatus_mprv; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_fs_0 = io_ptw_gstatus_fs; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_mpp_0 = io_ptw_gstatus_mpp; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_vs_0 = io_ptw_gstatus_vs; // @[TLB.scala:318:7] wire io_ptw_gstatus_spp_0 = io_ptw_gstatus_spp; // @[TLB.scala:318:7] wire io_ptw_gstatus_mpie_0 = io_ptw_gstatus_mpie; // @[TLB.scala:318:7] wire io_ptw_gstatus_ube_0 = io_ptw_gstatus_ube; // @[TLB.scala:318:7] wire io_ptw_gstatus_spie_0 = io_ptw_gstatus_spie; // @[TLB.scala:318:7] wire io_ptw_gstatus_upie_0 = io_ptw_gstatus_upie; // @[TLB.scala:318:7] wire io_ptw_gstatus_mie_0 = io_ptw_gstatus_mie; // @[TLB.scala:318:7] wire io_ptw_gstatus_hie_0 = io_ptw_gstatus_hie; // @[TLB.scala:318:7] wire io_ptw_gstatus_sie_0 = io_ptw_gstatus_sie; // @[TLB.scala:318:7] wire io_ptw_gstatus_uie_0 = io_ptw_gstatus_uie; // @[TLB.scala:318:7] wire io_ptw_pmp_0_cfg_l_0 = io_ptw_pmp_0_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_0_cfg_a_0 = io_ptw_pmp_0_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_0_cfg_x_0 = io_ptw_pmp_0_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_0_cfg_w_0 = io_ptw_pmp_0_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_0_cfg_r_0 = io_ptw_pmp_0_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_0_addr_0 = io_ptw_pmp_0_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_0_mask_0 = io_ptw_pmp_0_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_1_cfg_l_0 = io_ptw_pmp_1_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_1_cfg_a_0 = io_ptw_pmp_1_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_1_cfg_x_0 = io_ptw_pmp_1_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_1_cfg_w_0 = io_ptw_pmp_1_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_1_cfg_r_0 = io_ptw_pmp_1_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_1_addr_0 = io_ptw_pmp_1_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_1_mask_0 = io_ptw_pmp_1_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_2_cfg_l_0 = io_ptw_pmp_2_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_2_cfg_a_0 = io_ptw_pmp_2_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_2_cfg_x_0 = io_ptw_pmp_2_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_2_cfg_w_0 = io_ptw_pmp_2_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_2_cfg_r_0 = io_ptw_pmp_2_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_2_addr_0 = io_ptw_pmp_2_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_2_mask_0 = io_ptw_pmp_2_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_3_cfg_l_0 = io_ptw_pmp_3_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_3_cfg_a_0 = io_ptw_pmp_3_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_3_cfg_x_0 = io_ptw_pmp_3_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_3_cfg_w_0 = io_ptw_pmp_3_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_3_cfg_r_0 = io_ptw_pmp_3_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_3_addr_0 = io_ptw_pmp_3_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_3_mask_0 = io_ptw_pmp_3_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_4_cfg_l_0 = io_ptw_pmp_4_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_4_cfg_a_0 = io_ptw_pmp_4_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_4_cfg_x_0 = io_ptw_pmp_4_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_4_cfg_w_0 = io_ptw_pmp_4_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_4_cfg_r_0 = io_ptw_pmp_4_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_4_addr_0 = io_ptw_pmp_4_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_4_mask_0 = io_ptw_pmp_4_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_5_cfg_l_0 = io_ptw_pmp_5_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_5_cfg_a_0 = io_ptw_pmp_5_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_5_cfg_x_0 = io_ptw_pmp_5_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_5_cfg_w_0 = io_ptw_pmp_5_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_5_cfg_r_0 = io_ptw_pmp_5_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_5_addr_0 = io_ptw_pmp_5_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_5_mask_0 = io_ptw_pmp_5_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_6_cfg_l_0 = io_ptw_pmp_6_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_6_cfg_a_0 = io_ptw_pmp_6_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_6_cfg_x_0 = io_ptw_pmp_6_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_6_cfg_w_0 = io_ptw_pmp_6_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_6_cfg_r_0 = io_ptw_pmp_6_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_6_addr_0 = io_ptw_pmp_6_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_6_mask_0 = io_ptw_pmp_6_mask; // @[TLB.scala:318:7] wire io_ptw_pmp_7_cfg_l_0 = io_ptw_pmp_7_cfg_l; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_7_cfg_a_0 = io_ptw_pmp_7_cfg_a; // @[TLB.scala:318:7] wire io_ptw_pmp_7_cfg_x_0 = io_ptw_pmp_7_cfg_x; // @[TLB.scala:318:7] wire io_ptw_pmp_7_cfg_w_0 = io_ptw_pmp_7_cfg_w; // @[TLB.scala:318:7] wire io_ptw_pmp_7_cfg_r_0 = io_ptw_pmp_7_cfg_r; // @[TLB.scala:318:7] wire [29:0] io_ptw_pmp_7_addr_0 = io_ptw_pmp_7_addr; // @[TLB.scala:318:7] wire [31:0] io_ptw_pmp_7_mask_0 = io_ptw_pmp_7_mask; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_0_ren_0 = io_ptw_customCSRs_csrs_0_ren; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_0_wen_0 = io_ptw_customCSRs_csrs_0_wen; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_0_wdata_0 = io_ptw_customCSRs_csrs_0_wdata; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_0_value_0 = io_ptw_customCSRs_csrs_0_value; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_1_ren_0 = io_ptw_customCSRs_csrs_1_ren; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_1_wen_0 = io_ptw_customCSRs_csrs_1_wen; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_1_wdata_0 = io_ptw_customCSRs_csrs_1_wdata; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_1_value_0 = io_ptw_customCSRs_csrs_1_value; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_2_ren_0 = io_ptw_customCSRs_csrs_2_ren; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_2_wen_0 = io_ptw_customCSRs_csrs_2_wen; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_2_wdata_0 = io_ptw_customCSRs_csrs_2_wdata; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_2_value_0 = io_ptw_customCSRs_csrs_2_value; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_3_ren_0 = io_ptw_customCSRs_csrs_3_ren; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_3_wen_0 = io_ptw_customCSRs_csrs_3_wen; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_3_wdata_0 = io_ptw_customCSRs_csrs_3_wdata; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_3_value_0 = io_ptw_customCSRs_csrs_3_value; // @[TLB.scala:318:7] wire io_resp_gpa_is_pte = 1'h0; // @[TLB.scala:318:7] wire io_resp_gf_ld = 1'h0; // @[TLB.scala:318:7] wire io_resp_gf_st = 1'h0; // @[TLB.scala:318:7] wire io_resp_gf_inst = 1'h0; // @[TLB.scala:318:7] wire io_resp_ma_inst = 1'h0; // @[TLB.scala:318:7] wire io_ptw_req_bits_bits_vstage1 = 1'h0; // @[TLB.scala:318:7] wire io_ptw_req_bits_bits_stage2 = 1'h0; // @[TLB.scala:318:7] wire io_ptw_resp_bits_fragmented_superpage = 1'h0; // @[TLB.scala:318:7] wire io_ptw_status_mbe = 1'h0; // @[TLB.scala:318:7] wire io_ptw_status_sbe = 1'h0; // @[TLB.scala:318:7] wire io_ptw_status_sd_rv32 = 1'h0; // @[TLB.scala:318:7] wire io_ptw_status_ube = 1'h0; // @[TLB.scala:318:7] wire io_ptw_status_upie = 1'h0; // @[TLB.scala:318:7] wire io_ptw_status_hie = 1'h0; // @[TLB.scala:318:7] wire io_ptw_status_uie = 1'h0; // @[TLB.scala:318:7] wire io_ptw_hstatus_vtsr = 1'h0; // @[TLB.scala:318:7] wire io_ptw_hstatus_vtw = 1'h0; // @[TLB.scala:318:7] wire io_ptw_hstatus_vtvm = 1'h0; // @[TLB.scala:318:7] wire io_ptw_hstatus_hu = 1'h0; // @[TLB.scala:318:7] wire io_ptw_hstatus_vsbe = 1'h0; // @[TLB.scala:318:7] wire io_ptw_gstatus_sd_rv32 = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_0_stall = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_0_set = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_1_stall = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_1_set = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_2_stall = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_2_set = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_3_stall = 1'h0; // @[TLB.scala:318:7] wire io_ptw_customCSRs_csrs_3_set = 1'h0; // @[TLB.scala:318:7] wire io_kill = 1'h0; // @[TLB.scala:318:7] wire priv_v = 1'h0; // @[TLB.scala:369:34] wire _vstage1_en_T = 1'h0; // @[TLB.scala:376:38] wire _vstage1_en_T_1 = 1'h0; // @[TLB.scala:376:68] wire vstage1_en = 1'h0; // @[TLB.scala:376:48] wire _stage2_en_T = 1'h0; // @[TLB.scala:378:38] wire _stage2_en_T_1 = 1'h0; // @[TLB.scala:378:68] wire stage2_en = 1'h0; // @[TLB.scala:378:48] wire _vsatp_mode_mismatch_T = 1'h0; // @[TLB.scala:403:52] wire _vsatp_mode_mismatch_T_1 = 1'h0; // @[TLB.scala:403:37] wire vsatp_mode_mismatch = 1'h0; // @[TLB.scala:403:78] wire mpu_ppn_res = 1'h0; // @[TLB.scala:195:26] wire _superpage_hits_ignore_T = 1'h0; // @[TLB.scala:182:28] wire superpage_hits_ignore = 1'h0; // @[TLB.scala:182:34] wire _superpage_hits_ignore_T_4 = 1'h0; // @[TLB.scala:182:28] wire superpage_hits_ignore_4 = 1'h0; // @[TLB.scala:182:34] wire _superpage_hits_ignore_T_8 = 1'h0; // @[TLB.scala:182:28] wire superpage_hits_ignore_8 = 1'h0; // @[TLB.scala:182:34] wire _superpage_hits_ignore_T_12 = 1'h0; // @[TLB.scala:182:28] wire superpage_hits_ignore_12 = 1'h0; // @[TLB.scala:182:34] wire _hitsVec_ignore_T = 1'h0; // @[TLB.scala:182:28] wire hitsVec_ignore = 1'h0; // @[TLB.scala:182:34] wire _hitsVec_ignore_T_4 = 1'h0; // @[TLB.scala:182:28] wire hitsVec_ignore_4 = 1'h0; // @[TLB.scala:182:34] wire _hitsVec_ignore_T_8 = 1'h0; // @[TLB.scala:182:28] wire hitsVec_ignore_8 = 1'h0; // @[TLB.scala:182:34] wire _hitsVec_ignore_T_12 = 1'h0; // @[TLB.scala:182:28] wire hitsVec_ignore_12 = 1'h0; // @[TLB.scala:182:34] wire _hitsVec_ignore_T_16 = 1'h0; // @[TLB.scala:182:28] wire hitsVec_ignore_16 = 1'h0; // @[TLB.scala:182:34] wire refill_v = 1'h0; // @[TLB.scala:448:33] wire newEntry_ae_stage2 = 1'h0; // @[TLB.scala:449:24] wire newEntry_fragmented_superpage = 1'h0; // @[TLB.scala:449:24] wire _newEntry_ae_stage2_T_1 = 1'h0; // @[TLB.scala:456:84] wire _waddr_T = 1'h0; // @[TLB.scala:477:45] wire ppn_res = 1'h0; // @[TLB.scala:195:26] wire ppn_res_1 = 1'h0; // @[TLB.scala:195:26] wire ppn_res_2 = 1'h0; // @[TLB.scala:195:26] wire ppn_res_3 = 1'h0; // @[TLB.scala:195:26] wire ppn_res_4 = 1'h0; // @[TLB.scala:195:26] wire _mxr_T = 1'h0; // @[TLB.scala:518:36] wire cmd_readx = 1'h0; // @[TLB.scala:575:37] wire _gf_ld_array_T = 1'h0; // @[TLB.scala:600:32] wire _gf_st_array_T = 1'h0; // @[TLB.scala:601:32] wire _multipleHits_T_6 = 1'h0; // @[Misc.scala:183:37] wire _multipleHits_T_15 = 1'h0; // @[Misc.scala:183:37] wire _multipleHits_T_27 = 1'h0; // @[Misc.scala:183:37] wire _multipleHits_T_35 = 1'h0; // @[Misc.scala:183:37] wire _multipleHits_T_40 = 1'h0; // @[Misc.scala:183:37] wire _io_resp_gf_ld_T = 1'h0; // @[TLB.scala:637:29] wire _io_resp_gf_ld_T_2 = 1'h0; // @[TLB.scala:637:66] wire _io_resp_gf_ld_T_3 = 1'h0; // @[TLB.scala:637:42] wire _io_resp_gf_st_T = 1'h0; // @[TLB.scala:638:29] wire _io_resp_gf_st_T_2 = 1'h0; // @[TLB.scala:638:73] wire _io_resp_gf_st_T_3 = 1'h0; // @[TLB.scala:638:49] wire _io_resp_gf_inst_T_1 = 1'h0; // @[TLB.scala:639:56] wire _io_resp_gf_inst_T_2 = 1'h0; // @[TLB.scala:639:30] wire _io_resp_gpa_is_pte_T = 1'h0; // @[TLB.scala:655:36] wire hv = 1'h0; // @[TLB.scala:721:36] wire hg = 1'h0; // @[TLB.scala:722:36] wire hv_1 = 1'h0; // @[TLB.scala:721:36] wire hg_1 = 1'h0; // @[TLB.scala:722:36] wire hv_2 = 1'h0; // @[TLB.scala:721:36] wire hg_2 = 1'h0; // @[TLB.scala:722:36] wire hv_3 = 1'h0; // @[TLB.scala:721:36] wire hg_3 = 1'h0; // @[TLB.scala:722:36] wire hv_4 = 1'h0; // @[TLB.scala:721:36] wire hg_4 = 1'h0; // @[TLB.scala:722:36] wire hv_5 = 1'h0; // @[TLB.scala:721:36] wire hg_5 = 1'h0; // @[TLB.scala:722:36] wire hv_6 = 1'h0; // @[TLB.scala:721:36] wire hg_6 = 1'h0; // @[TLB.scala:722:36] wire hv_7 = 1'h0; // @[TLB.scala:721:36] wire hg_7 = 1'h0; // @[TLB.scala:722:36] wire hv_8 = 1'h0; // @[TLB.scala:721:36] wire hg_8 = 1'h0; // @[TLB.scala:722:36] wire _ignore_T = 1'h0; // @[TLB.scala:182:28] wire ignore = 1'h0; // @[TLB.scala:182:34] wire hv_9 = 1'h0; // @[TLB.scala:721:36] wire hg_9 = 1'h0; // @[TLB.scala:722:36] wire _ignore_T_4 = 1'h0; // @[TLB.scala:182:28] wire ignore_4 = 1'h0; // @[TLB.scala:182:34] wire hv_10 = 1'h0; // @[TLB.scala:721:36] wire hg_10 = 1'h0; // @[TLB.scala:722:36] wire _ignore_T_8 = 1'h0; // @[TLB.scala:182:28] wire ignore_8 = 1'h0; // @[TLB.scala:182:34] wire hv_11 = 1'h0; // @[TLB.scala:721:36] wire hg_11 = 1'h0; // @[TLB.scala:722:36] wire _ignore_T_12 = 1'h0; // @[TLB.scala:182:28] wire ignore_12 = 1'h0; // @[TLB.scala:182:34] wire hv_12 = 1'h0; // @[TLB.scala:721:36] wire hg_12 = 1'h0; // @[TLB.scala:722:36] wire _ignore_T_16 = 1'h0; // @[TLB.scala:182:28] wire ignore_16 = 1'h0; // @[TLB.scala:182:34] wire [15:0] io_ptw_ptbr_asid = 16'h0; // @[TLB.scala:318:7] wire [15:0] io_ptw_hgatp_asid = 16'h0; // @[TLB.scala:318:7] wire [15:0] io_ptw_vsatp_asid = 16'h0; // @[TLB.scala:318:7] wire [15:0] satp_asid = 16'h0; // @[TLB.scala:373:17] wire [3:0] io_ptw_hgatp_mode = 4'h0; // @[TLB.scala:318:7] wire [3:0] io_ptw_vsatp_mode = 4'h0; // @[TLB.scala:318:7] wire [43:0] io_ptw_hgatp_ppn = 44'h0; // @[TLB.scala:318:7] wire [43:0] io_ptw_vsatp_ppn = 44'h0; // @[TLB.scala:318:7] wire [22:0] io_ptw_status_zero2 = 23'h0; // @[TLB.scala:318:7] wire [7:0] io_ptw_status_zero1 = 8'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_xs = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_vs = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_hstatus_zero3 = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_hstatus_zero2 = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_xs = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_0_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_1_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_2_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_3_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_4_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_5_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_6_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [1:0] io_ptw_pmp_7_cfg_res = 2'h0; // @[TLB.scala:318:7] wire [29:0] io_ptw_hstatus_zero6 = 30'h0; // @[TLB.scala:318:7] wire [8:0] io_ptw_hstatus_zero5 = 9'h0; // @[TLB.scala:318:7] wire [5:0] io_ptw_hstatus_vgein = 6'h0; // @[TLB.scala:318:7] wire [4:0] io_ptw_hstatus_zero1 = 5'h0; // @[TLB.scala:318:7] wire io_ptw_req_bits_valid = 1'h1; // @[TLB.scala:318:7] wire _homogeneous_T_59 = 1'h1; // @[TLBPermissions.scala:87:22] wire superpage_hits_ignore_3 = 1'h1; // @[TLB.scala:182:34] wire _superpage_hits_T_18 = 1'h1; // @[TLB.scala:183:40] wire superpage_hits_ignore_7 = 1'h1; // @[TLB.scala:182:34] wire _superpage_hits_T_37 = 1'h1; // @[TLB.scala:183:40] wire superpage_hits_ignore_11 = 1'h1; // @[TLB.scala:182:34] wire _superpage_hits_T_56 = 1'h1; // @[TLB.scala:183:40] wire superpage_hits_ignore_15 = 1'h1; // @[TLB.scala:182:34] wire _superpage_hits_T_75 = 1'h1; // @[TLB.scala:183:40] wire hitsVec_ignore_3 = 1'h1; // @[TLB.scala:182:34] wire _hitsVec_T_66 = 1'h1; // @[TLB.scala:183:40] wire hitsVec_ignore_7 = 1'h1; // @[TLB.scala:182:34] wire _hitsVec_T_86 = 1'h1; // @[TLB.scala:183:40] wire hitsVec_ignore_11 = 1'h1; // @[TLB.scala:182:34] wire _hitsVec_T_106 = 1'h1; // @[TLB.scala:183:40] wire hitsVec_ignore_15 = 1'h1; // @[TLB.scala:182:34] wire _hitsVec_T_126 = 1'h1; // @[TLB.scala:183:40] wire ppn_ignore_2 = 1'h1; // @[TLB.scala:197:34] wire ppn_ignore_5 = 1'h1; // @[TLB.scala:197:34] wire ppn_ignore_8 = 1'h1; // @[TLB.scala:197:34] wire ppn_ignore_11 = 1'h1; // @[TLB.scala:197:34] wire _stage2_bypass_T = 1'h1; // @[TLB.scala:523:42] wire _gpa_hits_hit_mask_T_3 = 1'h1; // @[TLB.scala:606:107] wire _tlb_miss_T = 1'h1; // @[TLB.scala:613:32] wire _io_resp_gpa_page_T = 1'h1; // @[TLB.scala:657:20] wire _io_ptw_req_bits_valid_T = 1'h1; // @[TLB.scala:663:28] wire ignore_3 = 1'h1; // @[TLB.scala:182:34] wire ignore_7 = 1'h1; // @[TLB.scala:182:34] wire ignore_11 = 1'h1; // @[TLB.scala:182:34] wire ignore_15 = 1'h1; // @[TLB.scala:182:34] wire [1:0] io_ptw_status_sxl = 2'h2; // @[TLB.scala:318:7] wire [1:0] io_ptw_status_uxl = 2'h2; // @[TLB.scala:318:7] wire [1:0] io_ptw_hstatus_vsxl = 2'h2; // @[TLB.scala:318:7] wire [1:0] io_ptw_gstatus_uxl = 2'h2; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_0_sdata = 64'h0; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_1_sdata = 64'h0; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_2_sdata = 64'h0; // @[TLB.scala:318:7] wire [63:0] io_ptw_customCSRs_csrs_3_sdata = 64'h0; // @[TLB.scala:318:7] wire [13:0] _gf_ld_array_T_2 = 14'h0; // @[TLB.scala:600:46] wire [13:0] gf_ld_array = 14'h0; // @[TLB.scala:600:24] wire [13:0] _gf_st_array_T_1 = 14'h0; // @[TLB.scala:601:53] wire [13:0] gf_st_array = 14'h0; // @[TLB.scala:601:24] wire [13:0] _gf_inst_array_T = 14'h0; // @[TLB.scala:602:36] wire [13:0] gf_inst_array = 14'h0; // @[TLB.scala:602:26] wire [13:0] gpa_hits_need_gpa_mask = 14'h0; // @[TLB.scala:605:73] wire [13:0] _io_resp_gf_ld_T_1 = 14'h0; // @[TLB.scala:637:58] wire [13:0] _io_resp_gf_st_T_1 = 14'h0; // @[TLB.scala:638:65] wire [13:0] _io_resp_gf_inst_T = 14'h0; // @[TLB.scala:639:48] wire [6:0] _state_vec_WIRE_0 = 7'h0; // @[Replacement.scala:305:25] wire [12:0] stage2_bypass = 13'h1FFF; // @[TLB.scala:523:27] wire [12:0] _hr_array_T_4 = 13'h1FFF; // @[TLB.scala:524:111] wire [12:0] _hw_array_T_1 = 13'h1FFF; // @[TLB.scala:525:55] wire [12:0] _hx_array_T_1 = 13'h1FFF; // @[TLB.scala:526:55] wire [12:0] _gpa_hits_hit_mask_T_4 = 13'h1FFF; // @[TLB.scala:606:88] wire [12:0] gpa_hits_hit_mask = 13'h1FFF; // @[TLB.scala:606:82] wire [12:0] _gpa_hits_T_1 = 13'h1FFF; // @[TLB.scala:607:16] wire [12:0] gpa_hits = 13'h1FFF; // @[TLB.scala:607:14] wire [12:0] _stage1_bypass_T = 13'h0; // @[TLB.scala:517:27] wire [12:0] stage1_bypass = 13'h0; // @[TLB.scala:517:61] wire [12:0] _gpa_hits_T = 13'h0; // @[TLB.scala:607:30] wire [13:0] hr_array = 14'h3FFF; // @[TLB.scala:524:21] wire [13:0] hw_array = 14'h3FFF; // @[TLB.scala:525:21] wire [13:0] hx_array = 14'h3FFF; // @[TLB.scala:526:21] wire [13:0] _must_alloc_array_T_8 = 14'h3FFF; // @[TLB.scala:596:19] wire [13:0] _gf_ld_array_T_1 = 14'h3FFF; // @[TLB.scala:600:50] wire _io_req_ready_T; // @[TLB.scala:631:25] wire [1:0] io_resp_size_0 = io_req_bits_size_0; // @[TLB.scala:318:7] wire [4:0] io_resp_cmd_0 = io_req_bits_cmd_0; // @[TLB.scala:318:7] wire _io_resp_miss_T_2; // @[TLB.scala:651:64] wire [48:0] _io_resp_gpa_T; // @[TLB.scala:659:8] wire _io_resp_pf_ld_T_3; // @[TLB.scala:633:41] wire _io_resp_pf_st_T_3; // @[TLB.scala:634:48] wire _io_resp_pf_inst_T_2; // @[TLB.scala:635:29] wire _io_resp_ae_ld_T_1; // @[TLB.scala:641:41] wire _io_resp_ae_st_T_1; // @[TLB.scala:642:41] wire _io_resp_ae_inst_T_2; // @[TLB.scala:643:41] wire _io_resp_ma_ld_T; // @[TLB.scala:645:31] wire _io_resp_ma_st_T; // @[TLB.scala:646:31] wire _io_resp_cacheable_T_1; // @[TLB.scala:648:41] wire _io_resp_must_alloc_T_1; // @[TLB.scala:649:51] wire _io_resp_prefetchable_T_2; // @[TLB.scala:650:59] wire _io_ptw_req_valid_T; // @[TLB.scala:662:29] wire do_refill = io_ptw_resp_valid_0; // @[TLB.scala:318:7, :408:29] wire newEntry_ae_ptw = io_ptw_resp_bits_ae_ptw_0; // @[TLB.scala:318:7, :449:24] wire newEntry_ae_final = io_ptw_resp_bits_ae_final_0; // @[TLB.scala:318:7, :449:24] wire newEntry_pf = io_ptw_resp_bits_pf_0; // @[TLB.scala:318:7, :449:24] wire newEntry_gf = io_ptw_resp_bits_gf_0; // @[TLB.scala:318:7, :449:24] wire newEntry_hr = io_ptw_resp_bits_hr_0; // @[TLB.scala:318:7, :449:24] wire newEntry_hw = io_ptw_resp_bits_hw_0; // @[TLB.scala:318:7, :449:24] wire newEntry_hx = io_ptw_resp_bits_hx_0; // @[TLB.scala:318:7, :449:24] wire newEntry_u = io_ptw_resp_bits_pte_u_0; // @[TLB.scala:318:7, :449:24] wire [1:0] _special_entry_level_T = io_ptw_resp_bits_level_0; // @[package.scala:163:13] wire [1:0] _superpage_entries_0_level_T = io_ptw_resp_bits_level_0; // @[package.scala:163:13] wire [1:0] _superpage_entries_1_level_T = io_ptw_resp_bits_level_0; // @[package.scala:163:13] wire [1:0] _superpage_entries_2_level_T = io_ptw_resp_bits_level_0; // @[package.scala:163:13] wire [1:0] _superpage_entries_3_level_T = io_ptw_resp_bits_level_0; // @[package.scala:163:13] wire [3:0] satp_mode = io_ptw_ptbr_mode_0; // @[TLB.scala:318:7, :373:17] wire [43:0] satp_ppn = io_ptw_ptbr_ppn_0; // @[TLB.scala:318:7, :373:17] wire mxr = io_ptw_status_mxr_0; // @[TLB.scala:318:7, :518:31] wire sum = io_ptw_status_sum_0; // @[TLB.scala:318:7, :510:16] wire io_req_ready_0; // @[TLB.scala:318:7] wire io_resp_pf_ld_0; // @[TLB.scala:318:7] wire io_resp_pf_st_0; // @[TLB.scala:318:7] wire io_resp_pf_inst_0; // @[TLB.scala:318:7] wire io_resp_ae_ld_0; // @[TLB.scala:318:7] wire io_resp_ae_st_0; // @[TLB.scala:318:7] wire io_resp_ae_inst_0; // @[TLB.scala:318:7] wire io_resp_ma_ld_0; // @[TLB.scala:318:7] wire io_resp_ma_st_0; // @[TLB.scala:318:7] wire io_resp_miss_0; // @[TLB.scala:318:7] wire [31:0] io_resp_paddr_0; // @[TLB.scala:318:7] wire [48:0] io_resp_gpa_0; // @[TLB.scala:318:7] wire io_resp_cacheable_0; // @[TLB.scala:318:7] wire io_resp_must_alloc_0; // @[TLB.scala:318:7] wire io_resp_prefetchable_0; // @[TLB.scala:318:7] wire [35:0] io_ptw_req_bits_bits_addr_0; // @[TLB.scala:318:7] wire io_ptw_req_bits_bits_need_gpa_0; // @[TLB.scala:318:7] wire io_ptw_req_valid_0; // @[TLB.scala:318:7] wire [35:0] vpn = io_req_bits_vaddr_0[47:12]; // @[TLB.scala:318:7, :335:30] wire [35:0] _ppn_T_9 = vpn; // @[TLB.scala:198:28, :335:30] wire [35:0] _ppn_T_21 = vpn; // @[TLB.scala:198:28, :335:30] wire [35:0] _ppn_T_33 = vpn; // @[TLB.scala:198:28, :335:30] wire [35:0] _ppn_T_45 = vpn; // @[TLB.scala:198:28, :335:30] reg [1:0] sectored_entries_0_0_level; // @[TLB.scala:339:29] reg [35:0] sectored_entries_0_0_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_0_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_0_data_0; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_0_data_1; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_0_data_2; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_0_data_3; // @[TLB.scala:339:29] reg sectored_entries_0_0_valid_0; // @[TLB.scala:339:29] reg sectored_entries_0_0_valid_1; // @[TLB.scala:339:29] reg sectored_entries_0_0_valid_2; // @[TLB.scala:339:29] reg sectored_entries_0_0_valid_3; // @[TLB.scala:339:29] reg [1:0] sectored_entries_0_1_level; // @[TLB.scala:339:29] reg [35:0] sectored_entries_0_1_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_1_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_1_data_0; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_1_data_1; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_1_data_2; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_1_data_3; // @[TLB.scala:339:29] reg sectored_entries_0_1_valid_0; // @[TLB.scala:339:29] reg sectored_entries_0_1_valid_1; // @[TLB.scala:339:29] reg sectored_entries_0_1_valid_2; // @[TLB.scala:339:29] reg sectored_entries_0_1_valid_3; // @[TLB.scala:339:29] reg [1:0] sectored_entries_0_2_level; // @[TLB.scala:339:29] reg [35:0] sectored_entries_0_2_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_2_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_2_data_0; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_2_data_1; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_2_data_2; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_2_data_3; // @[TLB.scala:339:29] reg sectored_entries_0_2_valid_0; // @[TLB.scala:339:29] reg sectored_entries_0_2_valid_1; // @[TLB.scala:339:29] reg sectored_entries_0_2_valid_2; // @[TLB.scala:339:29] reg sectored_entries_0_2_valid_3; // @[TLB.scala:339:29] reg [1:0] sectored_entries_0_3_level; // @[TLB.scala:339:29] reg [35:0] sectored_entries_0_3_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_3_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_3_data_0; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_3_data_1; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_3_data_2; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_3_data_3; // @[TLB.scala:339:29] reg sectored_entries_0_3_valid_0; // @[TLB.scala:339:29] reg sectored_entries_0_3_valid_1; // @[TLB.scala:339:29] reg sectored_entries_0_3_valid_2; // @[TLB.scala:339:29] reg sectored_entries_0_3_valid_3; // @[TLB.scala:339:29] reg [1:0] sectored_entries_0_4_level; // @[TLB.scala:339:29] reg [35:0] sectored_entries_0_4_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_4_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_4_data_0; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_4_data_1; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_4_data_2; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_4_data_3; // @[TLB.scala:339:29] reg sectored_entries_0_4_valid_0; // @[TLB.scala:339:29] reg sectored_entries_0_4_valid_1; // @[TLB.scala:339:29] reg sectored_entries_0_4_valid_2; // @[TLB.scala:339:29] reg sectored_entries_0_4_valid_3; // @[TLB.scala:339:29] reg [1:0] sectored_entries_0_5_level; // @[TLB.scala:339:29] reg [35:0] sectored_entries_0_5_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_5_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_5_data_0; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_5_data_1; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_5_data_2; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_5_data_3; // @[TLB.scala:339:29] reg sectored_entries_0_5_valid_0; // @[TLB.scala:339:29] reg sectored_entries_0_5_valid_1; // @[TLB.scala:339:29] reg sectored_entries_0_5_valid_2; // @[TLB.scala:339:29] reg sectored_entries_0_5_valid_3; // @[TLB.scala:339:29] reg [1:0] sectored_entries_0_6_level; // @[TLB.scala:339:29] reg [35:0] sectored_entries_0_6_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_6_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_6_data_0; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_6_data_1; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_6_data_2; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_6_data_3; // @[TLB.scala:339:29] reg sectored_entries_0_6_valid_0; // @[TLB.scala:339:29] reg sectored_entries_0_6_valid_1; // @[TLB.scala:339:29] reg sectored_entries_0_6_valid_2; // @[TLB.scala:339:29] reg sectored_entries_0_6_valid_3; // @[TLB.scala:339:29] reg [1:0] sectored_entries_0_7_level; // @[TLB.scala:339:29] reg [35:0] sectored_entries_0_7_tag_vpn; // @[TLB.scala:339:29] reg sectored_entries_0_7_tag_v; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_7_data_0; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_7_data_1; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_7_data_2; // @[TLB.scala:339:29] reg [41:0] sectored_entries_0_7_data_3; // @[TLB.scala:339:29] reg sectored_entries_0_7_valid_0; // @[TLB.scala:339:29] reg sectored_entries_0_7_valid_1; // @[TLB.scala:339:29] reg sectored_entries_0_7_valid_2; // @[TLB.scala:339:29] reg sectored_entries_0_7_valid_3; // @[TLB.scala:339:29] reg [1:0] superpage_entries_0_level; // @[TLB.scala:341:30] reg [35:0] superpage_entries_0_tag_vpn; // @[TLB.scala:341:30] reg superpage_entries_0_tag_v; // @[TLB.scala:341:30] reg [41:0] superpage_entries_0_data_0; // @[TLB.scala:341:30] wire [41:0] _entries_WIRE_17 = superpage_entries_0_data_0; // @[TLB.scala:170:77, :341:30] reg superpage_entries_0_valid_0; // @[TLB.scala:341:30] reg [1:0] superpage_entries_1_level; // @[TLB.scala:341:30] reg [35:0] superpage_entries_1_tag_vpn; // @[TLB.scala:341:30] reg superpage_entries_1_tag_v; // @[TLB.scala:341:30] reg [41:0] superpage_entries_1_data_0; // @[TLB.scala:341:30] wire [41:0] _entries_WIRE_19 = superpage_entries_1_data_0; // @[TLB.scala:170:77, :341:30] reg superpage_entries_1_valid_0; // @[TLB.scala:341:30] reg [1:0] superpage_entries_2_level; // @[TLB.scala:341:30] reg [35:0] superpage_entries_2_tag_vpn; // @[TLB.scala:341:30] reg superpage_entries_2_tag_v; // @[TLB.scala:341:30] reg [41:0] superpage_entries_2_data_0; // @[TLB.scala:341:30] wire [41:0] _entries_WIRE_21 = superpage_entries_2_data_0; // @[TLB.scala:170:77, :341:30] reg superpage_entries_2_valid_0; // @[TLB.scala:341:30] reg [1:0] superpage_entries_3_level; // @[TLB.scala:341:30] reg [35:0] superpage_entries_3_tag_vpn; // @[TLB.scala:341:30] reg superpage_entries_3_tag_v; // @[TLB.scala:341:30] reg [41:0] superpage_entries_3_data_0; // @[TLB.scala:341:30] wire [41:0] _entries_WIRE_23 = superpage_entries_3_data_0; // @[TLB.scala:170:77, :341:30] reg superpage_entries_3_valid_0; // @[TLB.scala:341:30] reg [1:0] special_entry_level; // @[TLB.scala:346:56] reg [35:0] special_entry_tag_vpn; // @[TLB.scala:346:56] reg special_entry_tag_v; // @[TLB.scala:346:56] reg [41:0] special_entry_data_0; // @[TLB.scala:346:56] wire [41:0] _mpu_ppn_WIRE_1 = special_entry_data_0; // @[TLB.scala:170:77, :346:56] wire [41:0] _entries_WIRE_25 = special_entry_data_0; // @[TLB.scala:170:77, :346:56] reg special_entry_valid_0; // @[TLB.scala:346:56] reg [1:0] state; // @[TLB.scala:352:22] reg [35:0] r_refill_tag; // @[TLB.scala:354:25] assign io_ptw_req_bits_bits_addr_0 = r_refill_tag; // @[TLB.scala:318:7, :354:25] reg [1:0] r_superpage_repl_addr; // @[TLB.scala:355:34] wire [1:0] waddr = r_superpage_repl_addr; // @[TLB.scala:355:34, :477:22] reg [2:0] r_sectored_repl_addr; // @[TLB.scala:356:33] reg r_sectored_hit_valid; // @[TLB.scala:357:27] reg [2:0] r_sectored_hit_bits; // @[TLB.scala:357:27] reg r_superpage_hit_valid; // @[TLB.scala:358:28] reg [1:0] r_superpage_hit_bits; // @[TLB.scala:358:28] reg r_need_gpa; // @[TLB.scala:361:23] assign io_ptw_req_bits_bits_need_gpa_0 = r_need_gpa; // @[TLB.scala:318:7, :361:23] reg r_gpa_valid; // @[TLB.scala:362:24] reg [47:0] r_gpa; // @[TLB.scala:363:18] reg [35:0] r_gpa_vpn; // @[TLB.scala:364:22] reg r_gpa_is_pte; // @[TLB.scala:365:25] wire priv_s = io_req_bits_prv_0[0]; // @[TLB.scala:318:7, :370:20] wire priv_uses_vm = ~(io_req_bits_prv_0[1]); // @[TLB.scala:318:7, :372:27] wire _stage1_en_T = satp_mode[3]; // @[TLB.scala:373:17, :374:41] wire stage1_en = _stage1_en_T; // @[TLB.scala:374:{29,41}] wire _vm_enabled_T = stage1_en; // @[TLB.scala:374:29, :399:31] wire _vm_enabled_T_1 = _vm_enabled_T & priv_uses_vm; // @[TLB.scala:372:27, :399:{31,45}] wire _vm_enabled_T_2 = ~io_req_bits_passthrough_0; // @[TLB.scala:318:7, :399:64] wire vm_enabled = _vm_enabled_T_1 & _vm_enabled_T_2; // @[TLB.scala:399:{45,61,64}] wire _mpu_ppn_T = vm_enabled; // @[TLB.scala:399:61, :413:32] wire _tlb_miss_T_1 = vm_enabled; // @[TLB.scala:399:61, :613:29] wire _vsatp_mode_mismatch_T_2 = ~io_req_bits_passthrough_0; // @[TLB.scala:318:7, :399:64, :403:81] wire [19:0] refill_ppn = io_ptw_resp_bits_pte_ppn_0[19:0]; // @[TLB.scala:318:7, :406:44] wire [19:0] newEntry_ppn = io_ptw_resp_bits_pte_ppn_0[19:0]; // @[TLB.scala:318:7, :406:44, :449:24] wire _io_resp_miss_T = do_refill; // @[TLB.scala:408:29, :651:29] wire _T_51 = state == 2'h1; // @[package.scala:16:47] wire _invalidate_refill_T; // @[package.scala:16:47] assign _invalidate_refill_T = _T_51; // @[package.scala:16:47] assign _io_ptw_req_valid_T = _T_51; // @[package.scala:16:47] wire _invalidate_refill_T_1 = &state; // @[package.scala:16:47] wire _invalidate_refill_T_2 = _invalidate_refill_T | _invalidate_refill_T_1; // @[package.scala:16:47, :81:59] wire invalidate_refill = _invalidate_refill_T_2 | io_sfence_valid_0; // @[package.scala:81:59] wire [19:0] _mpu_ppn_T_23; // @[TLB.scala:170:77] wire _mpu_ppn_T_22; // @[TLB.scala:170:77] wire _mpu_ppn_T_21; // @[TLB.scala:170:77] wire _mpu_ppn_T_20; // @[TLB.scala:170:77] wire _mpu_ppn_T_19; // @[TLB.scala:170:77] wire _mpu_ppn_T_18; // @[TLB.scala:170:77] wire _mpu_ppn_T_17; // @[TLB.scala:170:77] wire _mpu_ppn_T_16; // @[TLB.scala:170:77] wire _mpu_ppn_T_15; // @[TLB.scala:170:77] wire _mpu_ppn_T_14; // @[TLB.scala:170:77] wire _mpu_ppn_T_13; // @[TLB.scala:170:77] wire _mpu_ppn_T_12; // @[TLB.scala:170:77] wire _mpu_ppn_T_11; // @[TLB.scala:170:77] wire _mpu_ppn_T_10; // @[TLB.scala:170:77] wire _mpu_ppn_T_9; // @[TLB.scala:170:77] wire _mpu_ppn_T_8; // @[TLB.scala:170:77] wire _mpu_ppn_T_7; // @[TLB.scala:170:77] wire _mpu_ppn_T_6; // @[TLB.scala:170:77] wire _mpu_ppn_T_5; // @[TLB.scala:170:77] wire _mpu_ppn_T_4; // @[TLB.scala:170:77] wire _mpu_ppn_T_3; // @[TLB.scala:170:77] wire _mpu_ppn_T_2; // @[TLB.scala:170:77] wire _mpu_ppn_T_1; // @[TLB.scala:170:77] assign _mpu_ppn_T_1 = _mpu_ppn_WIRE_1[0]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_fragmented_superpage = _mpu_ppn_T_1; // @[TLB.scala:170:77] assign _mpu_ppn_T_2 = _mpu_ppn_WIRE_1[1]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_c = _mpu_ppn_T_2; // @[TLB.scala:170:77] assign _mpu_ppn_T_3 = _mpu_ppn_WIRE_1[2]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_eff = _mpu_ppn_T_3; // @[TLB.scala:170:77] assign _mpu_ppn_T_4 = _mpu_ppn_WIRE_1[3]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_paa = _mpu_ppn_T_4; // @[TLB.scala:170:77] assign _mpu_ppn_T_5 = _mpu_ppn_WIRE_1[4]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_pal = _mpu_ppn_T_5; // @[TLB.scala:170:77] assign _mpu_ppn_T_6 = _mpu_ppn_WIRE_1[5]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_ppp = _mpu_ppn_T_6; // @[TLB.scala:170:77] assign _mpu_ppn_T_7 = _mpu_ppn_WIRE_1[6]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_pr = _mpu_ppn_T_7; // @[TLB.scala:170:77] assign _mpu_ppn_T_8 = _mpu_ppn_WIRE_1[7]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_px = _mpu_ppn_T_8; // @[TLB.scala:170:77] assign _mpu_ppn_T_9 = _mpu_ppn_WIRE_1[8]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_pw = _mpu_ppn_T_9; // @[TLB.scala:170:77] assign _mpu_ppn_T_10 = _mpu_ppn_WIRE_1[9]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_hr = _mpu_ppn_T_10; // @[TLB.scala:170:77] assign _mpu_ppn_T_11 = _mpu_ppn_WIRE_1[10]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_hx = _mpu_ppn_T_11; // @[TLB.scala:170:77] assign _mpu_ppn_T_12 = _mpu_ppn_WIRE_1[11]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_hw = _mpu_ppn_T_12; // @[TLB.scala:170:77] assign _mpu_ppn_T_13 = _mpu_ppn_WIRE_1[12]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_sr = _mpu_ppn_T_13; // @[TLB.scala:170:77] assign _mpu_ppn_T_14 = _mpu_ppn_WIRE_1[13]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_sx = _mpu_ppn_T_14; // @[TLB.scala:170:77] assign _mpu_ppn_T_15 = _mpu_ppn_WIRE_1[14]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_sw = _mpu_ppn_T_15; // @[TLB.scala:170:77] assign _mpu_ppn_T_16 = _mpu_ppn_WIRE_1[15]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_gf = _mpu_ppn_T_16; // @[TLB.scala:170:77] assign _mpu_ppn_T_17 = _mpu_ppn_WIRE_1[16]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_pf = _mpu_ppn_T_17; // @[TLB.scala:170:77] assign _mpu_ppn_T_18 = _mpu_ppn_WIRE_1[17]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_ae_stage2 = _mpu_ppn_T_18; // @[TLB.scala:170:77] assign _mpu_ppn_T_19 = _mpu_ppn_WIRE_1[18]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_ae_final = _mpu_ppn_T_19; // @[TLB.scala:170:77] assign _mpu_ppn_T_20 = _mpu_ppn_WIRE_1[19]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_ae_ptw = _mpu_ppn_T_20; // @[TLB.scala:170:77] assign _mpu_ppn_T_21 = _mpu_ppn_WIRE_1[20]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_g = _mpu_ppn_T_21; // @[TLB.scala:170:77] assign _mpu_ppn_T_22 = _mpu_ppn_WIRE_1[21]; // @[TLB.scala:170:77] wire _mpu_ppn_WIRE_u = _mpu_ppn_T_22; // @[TLB.scala:170:77] assign _mpu_ppn_T_23 = _mpu_ppn_WIRE_1[41:22]; // @[TLB.scala:170:77] wire [19:0] _mpu_ppn_WIRE_ppn = _mpu_ppn_T_23; // @[TLB.scala:170:77] wire _GEN = special_entry_level == 2'h0; // @[TLB.scala:197:28, :346:56] wire _mpu_ppn_ignore_T; // @[TLB.scala:197:28] assign _mpu_ppn_ignore_T = _GEN; // @[TLB.scala:197:28] wire _hitsVec_ignore_T_17; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_17 = _GEN; // @[TLB.scala:182:28, :197:28] wire _ppn_ignore_T_12; // @[TLB.scala:197:28] assign _ppn_ignore_T_12 = _GEN; // @[TLB.scala:197:28] wire _ignore_T_17; // @[TLB.scala:182:28] assign _ignore_T_17 = _GEN; // @[TLB.scala:182:28, :197:28] wire mpu_ppn_ignore = _mpu_ppn_ignore_T; // @[TLB.scala:197:{28,34}] wire [35:0] _mpu_ppn_T_24 = mpu_ppn_ignore ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _mpu_ppn_T_25 = {_mpu_ppn_T_24[35:20], _mpu_ppn_T_24[19:0] | _mpu_ppn_barrier_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _mpu_ppn_T_26 = _mpu_ppn_T_25[26:18]; // @[TLB.scala:198:{47,58}] wire [9:0] _mpu_ppn_T_27 = {1'h0, _mpu_ppn_T_26}; // @[TLB.scala:198:{18,58}] wire _mpu_ppn_ignore_T_1 = ~(special_entry_level[1]); // @[TLB.scala:197:28, :346:56] wire mpu_ppn_ignore_1 = _mpu_ppn_ignore_T_1; // @[TLB.scala:197:{28,34}] wire [35:0] _mpu_ppn_T_28 = mpu_ppn_ignore_1 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _mpu_ppn_T_29 = {_mpu_ppn_T_28[35:20], _mpu_ppn_T_28[19:0] | _mpu_ppn_barrier_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _mpu_ppn_T_30 = _mpu_ppn_T_29[17:9]; // @[TLB.scala:198:{47,58}] wire [18:0] _mpu_ppn_T_31 = {_mpu_ppn_T_27, _mpu_ppn_T_30}; // @[TLB.scala:198:{18,58}] wire _GEN_0 = special_entry_level != 2'h3; // @[TLB.scala:197:28, :346:56] wire _mpu_ppn_ignore_T_2; // @[TLB.scala:197:28] assign _mpu_ppn_ignore_T_2 = _GEN_0; // @[TLB.scala:197:28] wire _hitsVec_ignore_T_19; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_19 = _GEN_0; // @[TLB.scala:182:28, :197:28] wire _ppn_ignore_T_14; // @[TLB.scala:197:28] assign _ppn_ignore_T_14 = _GEN_0; // @[TLB.scala:197:28] wire _ignore_T_19; // @[TLB.scala:182:28] assign _ignore_T_19 = _GEN_0; // @[TLB.scala:182:28, :197:28] wire mpu_ppn_ignore_2 = _mpu_ppn_ignore_T_2; // @[TLB.scala:197:{28,34}] wire [35:0] _mpu_ppn_T_32 = mpu_ppn_ignore_2 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _mpu_ppn_T_33 = {_mpu_ppn_T_32[35:20], _mpu_ppn_T_32[19:0] | _mpu_ppn_barrier_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _mpu_ppn_T_34 = _mpu_ppn_T_33[8:0]; // @[TLB.scala:198:{47,58}] wire [27:0] _mpu_ppn_T_35 = {_mpu_ppn_T_31, _mpu_ppn_T_34}; // @[TLB.scala:198:{18,58}] wire [36:0] _mpu_ppn_T_36 = io_req_bits_vaddr_0[48:12]; // @[TLB.scala:318:7, :413:146] wire [36:0] _mpu_ppn_T_37 = _mpu_ppn_T ? {9'h0, _mpu_ppn_T_35} : _mpu_ppn_T_36; // @[TLB.scala:198:18, :413:{20,32,146}] wire [36:0] mpu_ppn = do_refill ? {17'h0, refill_ppn} : _mpu_ppn_T_37; // @[TLB.scala:406:44, :408:29, :412:20, :413:20] wire [11:0] _mpu_physaddr_T = io_req_bits_vaddr_0[11:0]; // @[TLB.scala:318:7, :414:52] wire [11:0] _io_resp_paddr_T = io_req_bits_vaddr_0[11:0]; // @[TLB.scala:318:7, :414:52, :652:46] wire [11:0] _io_resp_gpa_offset_T_1 = io_req_bits_vaddr_0[11:0]; // @[TLB.scala:318:7, :414:52, :658:82] wire [48:0] mpu_physaddr = {mpu_ppn, _mpu_physaddr_T}; // @[TLB.scala:412:20, :414:{25,52}] wire [48:0] _homogeneous_T = mpu_physaddr; // @[TLB.scala:414:25] wire [48:0] _homogeneous_T_67 = mpu_physaddr; // @[TLB.scala:414:25] wire [48:0] _deny_access_to_debug_T_1 = mpu_physaddr; // @[TLB.scala:414:25] wire _mpu_priv_T = do_refill | io_req_bits_passthrough_0; // @[TLB.scala:318:7, :408:29, :415:52] wire _mpu_priv_T_1 = _mpu_priv_T; // @[TLB.scala:415:{38,52}] wire [2:0] _mpu_priv_T_2 = {io_ptw_status_debug_0, io_req_bits_prv_0}; // @[TLB.scala:318:7, :415:103] wire [2:0] mpu_priv = _mpu_priv_T_1 ? 3'h1 : _mpu_priv_T_2; // @[TLB.scala:415:{27,38,103}] wire cacheable; // @[TLB.scala:425:41] wire newEntry_c = cacheable; // @[TLB.scala:425:41, :449:24] wire [49:0] _homogeneous_T_1 = {1'h0, _homogeneous_T}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_2 = _homogeneous_T_1 & 50'h3FFFFFFFFE000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_3 = _homogeneous_T_2; // @[Parameters.scala:137:46] wire _homogeneous_T_4 = _homogeneous_T_3 == 50'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_50 = _homogeneous_T_4; // @[TLBPermissions.scala:101:65] wire [48:0] _GEN_1 = {mpu_physaddr[48:14], mpu_physaddr[13:0] ^ 14'h3000}; // @[TLB.scala:414:25] wire [48:0] _homogeneous_T_5; // @[Parameters.scala:137:31] assign _homogeneous_T_5 = _GEN_1; // @[Parameters.scala:137:31] wire [48:0] _homogeneous_T_72; // @[Parameters.scala:137:31] assign _homogeneous_T_72 = _GEN_1; // @[Parameters.scala:137:31] wire [49:0] _homogeneous_T_6 = {1'h0, _homogeneous_T_5}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_7 = _homogeneous_T_6 & 50'h3FFFFFFFFF000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_8 = _homogeneous_T_7; // @[Parameters.scala:137:46] wire _homogeneous_T_9 = _homogeneous_T_8 == 50'h0; // @[Parameters.scala:137:{46,59}] wire [48:0] _GEN_2 = {mpu_physaddr[48:17], mpu_physaddr[16:0] ^ 17'h10000}; // @[TLB.scala:414:25] wire [48:0] _homogeneous_T_10; // @[Parameters.scala:137:31] assign _homogeneous_T_10 = _GEN_2; // @[Parameters.scala:137:31] wire [48:0] _homogeneous_T_60; // @[Parameters.scala:137:31] assign _homogeneous_T_60 = _GEN_2; // @[Parameters.scala:137:31] wire [48:0] _homogeneous_T_77; // @[Parameters.scala:137:31] assign _homogeneous_T_77 = _GEN_2; // @[Parameters.scala:137:31] wire [48:0] _homogeneous_T_109; // @[Parameters.scala:137:31] assign _homogeneous_T_109 = _GEN_2; // @[Parameters.scala:137:31] wire [48:0] _homogeneous_T_116; // @[Parameters.scala:137:31] assign _homogeneous_T_116 = _GEN_2; // @[Parameters.scala:137:31] wire [49:0] _homogeneous_T_11 = {1'h0, _homogeneous_T_10}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_12 = _homogeneous_T_11 & 50'h3FFFFFFFF0000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_13 = _homogeneous_T_12; // @[Parameters.scala:137:46] wire _homogeneous_T_14 = _homogeneous_T_13 == 50'h0; // @[Parameters.scala:137:{46,59}] wire [48:0] _homogeneous_T_15 = {mpu_physaddr[48:21], mpu_physaddr[20:0] ^ 21'h100000}; // @[TLB.scala:414:25] wire [49:0] _homogeneous_T_16 = {1'h0, _homogeneous_T_15}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_17 = _homogeneous_T_16 & 50'h3FFFFFFFEF000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_18 = _homogeneous_T_17; // @[Parameters.scala:137:46] wire _homogeneous_T_19 = _homogeneous_T_18 == 50'h0; // @[Parameters.scala:137:{46,59}] wire [48:0] _homogeneous_T_20 = {mpu_physaddr[48:26], mpu_physaddr[25:0] ^ 26'h2000000}; // @[TLB.scala:414:25] wire [49:0] _homogeneous_T_21 = {1'h0, _homogeneous_T_20}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_22 = _homogeneous_T_21 & 50'h3FFFFFFFF0000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_23 = _homogeneous_T_22; // @[Parameters.scala:137:46] wire _homogeneous_T_24 = _homogeneous_T_23 == 50'h0; // @[Parameters.scala:137:{46,59}] wire [48:0] _homogeneous_T_25 = {mpu_physaddr[48:26], mpu_physaddr[25:0] ^ 26'h2010000}; // @[TLB.scala:414:25] wire [49:0] _homogeneous_T_26 = {1'h0, _homogeneous_T_25}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_27 = _homogeneous_T_26 & 50'h3FFFFFFFFF000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_28 = _homogeneous_T_27; // @[Parameters.scala:137:46] wire _homogeneous_T_29 = _homogeneous_T_28 == 50'h0; // @[Parameters.scala:137:{46,59}] wire [48:0] _GEN_3 = {mpu_physaddr[48:28], mpu_physaddr[27:0] ^ 28'h8000000}; // @[TLB.scala:414:25] wire [48:0] _homogeneous_T_30; // @[Parameters.scala:137:31] assign _homogeneous_T_30 = _GEN_3; // @[Parameters.scala:137:31] wire [48:0] _homogeneous_T_82; // @[Parameters.scala:137:31] assign _homogeneous_T_82 = _GEN_3; // @[Parameters.scala:137:31] wire [48:0] _homogeneous_T_97; // @[Parameters.scala:137:31] assign _homogeneous_T_97 = _GEN_3; // @[Parameters.scala:137:31] wire [49:0] _homogeneous_T_31 = {1'h0, _homogeneous_T_30}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_32 = _homogeneous_T_31 & 50'h3FFFFFFFF0000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_33 = _homogeneous_T_32; // @[Parameters.scala:137:46] wire _homogeneous_T_34 = _homogeneous_T_33 == 50'h0; // @[Parameters.scala:137:{46,59}] wire [48:0] _homogeneous_T_35 = {mpu_physaddr[48:28], mpu_physaddr[27:0] ^ 28'hC000000}; // @[TLB.scala:414:25] wire [49:0] _homogeneous_T_36 = {1'h0, _homogeneous_T_35}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_37 = _homogeneous_T_36 & 50'h3FFFFFC000000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_38 = _homogeneous_T_37; // @[Parameters.scala:137:46] wire _homogeneous_T_39 = _homogeneous_T_38 == 50'h0; // @[Parameters.scala:137:{46,59}] wire [48:0] _homogeneous_T_40 = {mpu_physaddr[48:29], mpu_physaddr[28:0] ^ 29'h10020000}; // @[TLB.scala:414:25] wire [49:0] _homogeneous_T_41 = {1'h0, _homogeneous_T_40}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_42 = _homogeneous_T_41 & 50'h3FFFFFFFFF000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_43 = _homogeneous_T_42; // @[Parameters.scala:137:46] wire _homogeneous_T_44 = _homogeneous_T_43 == 50'h0; // @[Parameters.scala:137:{46,59}] wire [48:0] _GEN_4 = {mpu_physaddr[48:32], mpu_physaddr[31:0] ^ 32'h80000000}; // @[TLB.scala:414:25, :417:15] wire [48:0] _homogeneous_T_45; // @[Parameters.scala:137:31] assign _homogeneous_T_45 = _GEN_4; // @[Parameters.scala:137:31] wire [48:0] _homogeneous_T_87; // @[Parameters.scala:137:31] assign _homogeneous_T_87 = _GEN_4; // @[Parameters.scala:137:31] wire [48:0] _homogeneous_T_102; // @[Parameters.scala:137:31] assign _homogeneous_T_102 = _GEN_4; // @[Parameters.scala:137:31] wire [49:0] _homogeneous_T_46 = {1'h0, _homogeneous_T_45}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_47 = _homogeneous_T_46 & 50'h3FFFFF0000000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_48 = _homogeneous_T_47; // @[Parameters.scala:137:46] wire _homogeneous_T_49 = _homogeneous_T_48 == 50'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_51 = _homogeneous_T_50 | _homogeneous_T_9; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_52 = _homogeneous_T_51 | _homogeneous_T_14; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_53 = _homogeneous_T_52 | _homogeneous_T_19; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_54 = _homogeneous_T_53 | _homogeneous_T_24; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_55 = _homogeneous_T_54 | _homogeneous_T_29; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_56 = _homogeneous_T_55 | _homogeneous_T_34; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_57 = _homogeneous_T_56 | _homogeneous_T_39; // @[TLBPermissions.scala:101:65] wire _homogeneous_T_58 = _homogeneous_T_57 | _homogeneous_T_44; // @[TLBPermissions.scala:101:65] wire homogeneous = _homogeneous_T_58 | _homogeneous_T_49; // @[TLBPermissions.scala:101:65] wire [49:0] _homogeneous_T_61 = {1'h0, _homogeneous_T_60}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_62 = _homogeneous_T_61 & 50'h8A110000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_63 = _homogeneous_T_62; // @[Parameters.scala:137:46] wire _homogeneous_T_64 = _homogeneous_T_63 == 50'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_65 = _homogeneous_T_64; // @[TLBPermissions.scala:87:66] wire _homogeneous_T_66 = ~_homogeneous_T_65; // @[TLBPermissions.scala:87:{22,66}] wire [49:0] _homogeneous_T_68 = {1'h0, _homogeneous_T_67}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_69 = _homogeneous_T_68 & 50'h9E113000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_70 = _homogeneous_T_69; // @[Parameters.scala:137:46] wire _homogeneous_T_71 = _homogeneous_T_70 == 50'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_92 = _homogeneous_T_71; // @[TLBPermissions.scala:85:66] wire [49:0] _homogeneous_T_73 = {1'h0, _homogeneous_T_72}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_74 = _homogeneous_T_73 & 50'h9E113000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_75 = _homogeneous_T_74; // @[Parameters.scala:137:46] wire _homogeneous_T_76 = _homogeneous_T_75 == 50'h0; // @[Parameters.scala:137:{46,59}] wire [49:0] _homogeneous_T_78 = {1'h0, _homogeneous_T_77}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_79 = _homogeneous_T_78 & 50'h9E110000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_80 = _homogeneous_T_79; // @[Parameters.scala:137:46] wire _homogeneous_T_81 = _homogeneous_T_80 == 50'h0; // @[Parameters.scala:137:{46,59}] wire [49:0] _homogeneous_T_83 = {1'h0, _homogeneous_T_82}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_84 = _homogeneous_T_83 & 50'h9E110000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_85 = _homogeneous_T_84; // @[Parameters.scala:137:46] wire _homogeneous_T_86 = _homogeneous_T_85 == 50'h0; // @[Parameters.scala:137:{46,59}] wire [49:0] _homogeneous_T_88 = {1'h0, _homogeneous_T_87}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_89 = _homogeneous_T_88 & 50'h90000000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_90 = _homogeneous_T_89; // @[Parameters.scala:137:46] wire _homogeneous_T_91 = _homogeneous_T_90 == 50'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_93 = _homogeneous_T_92 | _homogeneous_T_76; // @[TLBPermissions.scala:85:66] wire _homogeneous_T_94 = _homogeneous_T_93 | _homogeneous_T_81; // @[TLBPermissions.scala:85:66] wire _homogeneous_T_95 = _homogeneous_T_94 | _homogeneous_T_86; // @[TLBPermissions.scala:85:66] wire _homogeneous_T_96 = _homogeneous_T_95 | _homogeneous_T_91; // @[TLBPermissions.scala:85:66] wire [49:0] _homogeneous_T_98 = {1'h0, _homogeneous_T_97}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_99 = _homogeneous_T_98 & 50'h8E000000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_100 = _homogeneous_T_99; // @[Parameters.scala:137:46] wire _homogeneous_T_101 = _homogeneous_T_100 == 50'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_107 = _homogeneous_T_101; // @[TLBPermissions.scala:85:66] wire [49:0] _homogeneous_T_103 = {1'h0, _homogeneous_T_102}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_104 = _homogeneous_T_103 & 50'h80000000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_105 = _homogeneous_T_104; // @[Parameters.scala:137:46] wire _homogeneous_T_106 = _homogeneous_T_105 == 50'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_108 = _homogeneous_T_107 | _homogeneous_T_106; // @[TLBPermissions.scala:85:66] wire [49:0] _homogeneous_T_110 = {1'h0, _homogeneous_T_109}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_111 = _homogeneous_T_110 & 50'h8A110000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_112 = _homogeneous_T_111; // @[Parameters.scala:137:46] wire _homogeneous_T_113 = _homogeneous_T_112 == 50'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_114 = _homogeneous_T_113; // @[TLBPermissions.scala:87:66] wire _homogeneous_T_115 = ~_homogeneous_T_114; // @[TLBPermissions.scala:87:{22,66}] wire [49:0] _homogeneous_T_117 = {1'h0, _homogeneous_T_116}; // @[Parameters.scala:137:{31,41}] wire [49:0] _homogeneous_T_118 = _homogeneous_T_117 & 50'h8A110000; // @[Parameters.scala:137:{41,46}] wire [49:0] _homogeneous_T_119 = _homogeneous_T_118; // @[Parameters.scala:137:46] wire _homogeneous_T_120 = _homogeneous_T_119 == 50'h0; // @[Parameters.scala:137:{46,59}] wire _homogeneous_T_121 = _homogeneous_T_120; // @[TLBPermissions.scala:87:66] wire _homogeneous_T_122 = ~_homogeneous_T_121; // @[TLBPermissions.scala:87:{22,66}] wire _deny_access_to_debug_T = ~(mpu_priv[2]); // @[TLB.scala:415:27, :428:39] wire [49:0] _deny_access_to_debug_T_2 = {1'h0, _deny_access_to_debug_T_1}; // @[Parameters.scala:137:{31,41}] wire [49:0] _deny_access_to_debug_T_3 = _deny_access_to_debug_T_2 & 50'h3FFFFFFFFF000; // @[Parameters.scala:137:{41,46}] wire [49:0] _deny_access_to_debug_T_4 = _deny_access_to_debug_T_3; // @[Parameters.scala:137:46] wire _deny_access_to_debug_T_5 = _deny_access_to_debug_T_4 == 50'h0; // @[Parameters.scala:137:{46,59}] wire deny_access_to_debug = _deny_access_to_debug_T & _deny_access_to_debug_T_5; // @[TLB.scala:428:{39,50}] wire _prot_r_T = ~deny_access_to_debug; // @[TLB.scala:428:50, :429:33] wire _prot_r_T_1 = _pma_io_resp_r & _prot_r_T; // @[TLB.scala:422:19, :429:{30,33}] wire prot_r = _prot_r_T_1 & _pmp_io_r; // @[TLB.scala:416:19, :429:{30,55}] wire newEntry_pr = prot_r; // @[TLB.scala:429:55, :449:24] wire _prot_w_T = ~deny_access_to_debug; // @[TLB.scala:428:50, :429:33, :430:33] wire _prot_w_T_1 = _pma_io_resp_w & _prot_w_T; // @[TLB.scala:422:19, :430:{30,33}] wire prot_w = _prot_w_T_1 & _pmp_io_w; // @[TLB.scala:416:19, :430:{30,55}] wire newEntry_pw = prot_w; // @[TLB.scala:430:55, :449:24] wire _prot_x_T = ~deny_access_to_debug; // @[TLB.scala:428:50, :429:33, :434:33] wire _prot_x_T_1 = _pma_io_resp_x & _prot_x_T; // @[TLB.scala:422:19, :434:{30,33}] wire prot_x = _prot_x_T_1 & _pmp_io_x; // @[TLB.scala:416:19, :434:{30,55}] wire newEntry_px = prot_x; // @[TLB.scala:434:55, :449:24] wire _GEN_5 = sectored_entries_0_0_valid_0 | sectored_entries_0_0_valid_1; // @[package.scala:81:59] wire _sector_hits_T; // @[package.scala:81:59] assign _sector_hits_T = _GEN_5; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T; // @[package.scala:81:59] assign _r_sectored_repl_addr_valids_T = _GEN_5; // @[package.scala:81:59] wire _sector_hits_T_1 = _sector_hits_T | sectored_entries_0_0_valid_2; // @[package.scala:81:59] wire _sector_hits_T_2 = _sector_hits_T_1 | sectored_entries_0_0_valid_3; // @[package.scala:81:59] wire [35:0] _T_176 = sectored_entries_0_0_tag_vpn ^ vpn; // @[TLB.scala:174:61, :335:30, :339:29] wire [35:0] _sector_hits_T_3; // @[TLB.scala:174:61] assign _sector_hits_T_3 = _T_176; // @[TLB.scala:174:61] wire [35:0] _hitsVec_T; // @[TLB.scala:174:61] assign _hitsVec_T = _T_176; // @[TLB.scala:174:61] wire [33:0] _sector_hits_T_4 = _sector_hits_T_3[35:2]; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_5 = _sector_hits_T_4 == 34'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_6 = ~sectored_entries_0_0_tag_v; // @[TLB.scala:174:105, :339:29] wire _sector_hits_T_7 = _sector_hits_T_5 & _sector_hits_T_6; // @[TLB.scala:174:{86,95,105}] wire sector_hits_0 = _sector_hits_T_2 & _sector_hits_T_7; // @[package.scala:81:59] wire _GEN_6 = sectored_entries_0_1_valid_0 | sectored_entries_0_1_valid_1; // @[package.scala:81:59] wire _sector_hits_T_8; // @[package.scala:81:59] assign _sector_hits_T_8 = _GEN_6; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_3; // @[package.scala:81:59] assign _r_sectored_repl_addr_valids_T_3 = _GEN_6; // @[package.scala:81:59] wire _sector_hits_T_9 = _sector_hits_T_8 | sectored_entries_0_1_valid_2; // @[package.scala:81:59] wire _sector_hits_T_10 = _sector_hits_T_9 | sectored_entries_0_1_valid_3; // @[package.scala:81:59] wire [35:0] _T_597 = sectored_entries_0_1_tag_vpn ^ vpn; // @[TLB.scala:174:61, :335:30, :339:29] wire [35:0] _sector_hits_T_11; // @[TLB.scala:174:61] assign _sector_hits_T_11 = _T_597; // @[TLB.scala:174:61] wire [35:0] _hitsVec_T_6; // @[TLB.scala:174:61] assign _hitsVec_T_6 = _T_597; // @[TLB.scala:174:61] wire [33:0] _sector_hits_T_12 = _sector_hits_T_11[35:2]; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_13 = _sector_hits_T_12 == 34'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_14 = ~sectored_entries_0_1_tag_v; // @[TLB.scala:174:105, :339:29] wire _sector_hits_T_15 = _sector_hits_T_13 & _sector_hits_T_14; // @[TLB.scala:174:{86,95,105}] wire sector_hits_1 = _sector_hits_T_10 & _sector_hits_T_15; // @[package.scala:81:59] wire _GEN_7 = sectored_entries_0_2_valid_0 | sectored_entries_0_2_valid_1; // @[package.scala:81:59] wire _sector_hits_T_16; // @[package.scala:81:59] assign _sector_hits_T_16 = _GEN_7; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_6; // @[package.scala:81:59] assign _r_sectored_repl_addr_valids_T_6 = _GEN_7; // @[package.scala:81:59] wire _sector_hits_T_17 = _sector_hits_T_16 | sectored_entries_0_2_valid_2; // @[package.scala:81:59] wire _sector_hits_T_18 = _sector_hits_T_17 | sectored_entries_0_2_valid_3; // @[package.scala:81:59] wire [35:0] _T_1018 = sectored_entries_0_2_tag_vpn ^ vpn; // @[TLB.scala:174:61, :335:30, :339:29] wire [35:0] _sector_hits_T_19; // @[TLB.scala:174:61] assign _sector_hits_T_19 = _T_1018; // @[TLB.scala:174:61] wire [35:0] _hitsVec_T_12; // @[TLB.scala:174:61] assign _hitsVec_T_12 = _T_1018; // @[TLB.scala:174:61] wire [33:0] _sector_hits_T_20 = _sector_hits_T_19[35:2]; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_21 = _sector_hits_T_20 == 34'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_22 = ~sectored_entries_0_2_tag_v; // @[TLB.scala:174:105, :339:29] wire _sector_hits_T_23 = _sector_hits_T_21 & _sector_hits_T_22; // @[TLB.scala:174:{86,95,105}] wire sector_hits_2 = _sector_hits_T_18 & _sector_hits_T_23; // @[package.scala:81:59] wire _GEN_8 = sectored_entries_0_3_valid_0 | sectored_entries_0_3_valid_1; // @[package.scala:81:59] wire _sector_hits_T_24; // @[package.scala:81:59] assign _sector_hits_T_24 = _GEN_8; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_9; // @[package.scala:81:59] assign _r_sectored_repl_addr_valids_T_9 = _GEN_8; // @[package.scala:81:59] wire _sector_hits_T_25 = _sector_hits_T_24 | sectored_entries_0_3_valid_2; // @[package.scala:81:59] wire _sector_hits_T_26 = _sector_hits_T_25 | sectored_entries_0_3_valid_3; // @[package.scala:81:59] wire [35:0] _T_1439 = sectored_entries_0_3_tag_vpn ^ vpn; // @[TLB.scala:174:61, :335:30, :339:29] wire [35:0] _sector_hits_T_27; // @[TLB.scala:174:61] assign _sector_hits_T_27 = _T_1439; // @[TLB.scala:174:61] wire [35:0] _hitsVec_T_18; // @[TLB.scala:174:61] assign _hitsVec_T_18 = _T_1439; // @[TLB.scala:174:61] wire [33:0] _sector_hits_T_28 = _sector_hits_T_27[35:2]; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_29 = _sector_hits_T_28 == 34'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_30 = ~sectored_entries_0_3_tag_v; // @[TLB.scala:174:105, :339:29] wire _sector_hits_T_31 = _sector_hits_T_29 & _sector_hits_T_30; // @[TLB.scala:174:{86,95,105}] wire sector_hits_3 = _sector_hits_T_26 & _sector_hits_T_31; // @[package.scala:81:59] wire _GEN_9 = sectored_entries_0_4_valid_0 | sectored_entries_0_4_valid_1; // @[package.scala:81:59] wire _sector_hits_T_32; // @[package.scala:81:59] assign _sector_hits_T_32 = _GEN_9; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_12; // @[package.scala:81:59] assign _r_sectored_repl_addr_valids_T_12 = _GEN_9; // @[package.scala:81:59] wire _sector_hits_T_33 = _sector_hits_T_32 | sectored_entries_0_4_valid_2; // @[package.scala:81:59] wire _sector_hits_T_34 = _sector_hits_T_33 | sectored_entries_0_4_valid_3; // @[package.scala:81:59] wire [35:0] _T_1860 = sectored_entries_0_4_tag_vpn ^ vpn; // @[TLB.scala:174:61, :335:30, :339:29] wire [35:0] _sector_hits_T_35; // @[TLB.scala:174:61] assign _sector_hits_T_35 = _T_1860; // @[TLB.scala:174:61] wire [35:0] _hitsVec_T_24; // @[TLB.scala:174:61] assign _hitsVec_T_24 = _T_1860; // @[TLB.scala:174:61] wire [33:0] _sector_hits_T_36 = _sector_hits_T_35[35:2]; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_37 = _sector_hits_T_36 == 34'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_38 = ~sectored_entries_0_4_tag_v; // @[TLB.scala:174:105, :339:29] wire _sector_hits_T_39 = _sector_hits_T_37 & _sector_hits_T_38; // @[TLB.scala:174:{86,95,105}] wire sector_hits_4 = _sector_hits_T_34 & _sector_hits_T_39; // @[package.scala:81:59] wire _GEN_10 = sectored_entries_0_5_valid_0 | sectored_entries_0_5_valid_1; // @[package.scala:81:59] wire _sector_hits_T_40; // @[package.scala:81:59] assign _sector_hits_T_40 = _GEN_10; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_15; // @[package.scala:81:59] assign _r_sectored_repl_addr_valids_T_15 = _GEN_10; // @[package.scala:81:59] wire _sector_hits_T_41 = _sector_hits_T_40 | sectored_entries_0_5_valid_2; // @[package.scala:81:59] wire _sector_hits_T_42 = _sector_hits_T_41 | sectored_entries_0_5_valid_3; // @[package.scala:81:59] wire [35:0] _T_2281 = sectored_entries_0_5_tag_vpn ^ vpn; // @[TLB.scala:174:61, :335:30, :339:29] wire [35:0] _sector_hits_T_43; // @[TLB.scala:174:61] assign _sector_hits_T_43 = _T_2281; // @[TLB.scala:174:61] wire [35:0] _hitsVec_T_30; // @[TLB.scala:174:61] assign _hitsVec_T_30 = _T_2281; // @[TLB.scala:174:61] wire [33:0] _sector_hits_T_44 = _sector_hits_T_43[35:2]; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_45 = _sector_hits_T_44 == 34'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_46 = ~sectored_entries_0_5_tag_v; // @[TLB.scala:174:105, :339:29] wire _sector_hits_T_47 = _sector_hits_T_45 & _sector_hits_T_46; // @[TLB.scala:174:{86,95,105}] wire sector_hits_5 = _sector_hits_T_42 & _sector_hits_T_47; // @[package.scala:81:59] wire _GEN_11 = sectored_entries_0_6_valid_0 | sectored_entries_0_6_valid_1; // @[package.scala:81:59] wire _sector_hits_T_48; // @[package.scala:81:59] assign _sector_hits_T_48 = _GEN_11; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_18; // @[package.scala:81:59] assign _r_sectored_repl_addr_valids_T_18 = _GEN_11; // @[package.scala:81:59] wire _sector_hits_T_49 = _sector_hits_T_48 | sectored_entries_0_6_valid_2; // @[package.scala:81:59] wire _sector_hits_T_50 = _sector_hits_T_49 | sectored_entries_0_6_valid_3; // @[package.scala:81:59] wire [35:0] _T_2702 = sectored_entries_0_6_tag_vpn ^ vpn; // @[TLB.scala:174:61, :335:30, :339:29] wire [35:0] _sector_hits_T_51; // @[TLB.scala:174:61] assign _sector_hits_T_51 = _T_2702; // @[TLB.scala:174:61] wire [35:0] _hitsVec_T_36; // @[TLB.scala:174:61] assign _hitsVec_T_36 = _T_2702; // @[TLB.scala:174:61] wire [33:0] _sector_hits_T_52 = _sector_hits_T_51[35:2]; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_53 = _sector_hits_T_52 == 34'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_54 = ~sectored_entries_0_6_tag_v; // @[TLB.scala:174:105, :339:29] wire _sector_hits_T_55 = _sector_hits_T_53 & _sector_hits_T_54; // @[TLB.scala:174:{86,95,105}] wire sector_hits_6 = _sector_hits_T_50 & _sector_hits_T_55; // @[package.scala:81:59] wire _GEN_12 = sectored_entries_0_7_valid_0 | sectored_entries_0_7_valid_1; // @[package.scala:81:59] wire _sector_hits_T_56; // @[package.scala:81:59] assign _sector_hits_T_56 = _GEN_12; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_21; // @[package.scala:81:59] assign _r_sectored_repl_addr_valids_T_21 = _GEN_12; // @[package.scala:81:59] wire _sector_hits_T_57 = _sector_hits_T_56 | sectored_entries_0_7_valid_2; // @[package.scala:81:59] wire _sector_hits_T_58 = _sector_hits_T_57 | sectored_entries_0_7_valid_3; // @[package.scala:81:59] wire [35:0] _T_3123 = sectored_entries_0_7_tag_vpn ^ vpn; // @[TLB.scala:174:61, :335:30, :339:29] wire [35:0] _sector_hits_T_59; // @[TLB.scala:174:61] assign _sector_hits_T_59 = _T_3123; // @[TLB.scala:174:61] wire [35:0] _hitsVec_T_42; // @[TLB.scala:174:61] assign _hitsVec_T_42 = _T_3123; // @[TLB.scala:174:61] wire [33:0] _sector_hits_T_60 = _sector_hits_T_59[35:2]; // @[TLB.scala:174:{61,68}] wire _sector_hits_T_61 = _sector_hits_T_60 == 34'h0; // @[TLB.scala:174:{68,86}] wire _sector_hits_T_62 = ~sectored_entries_0_7_tag_v; // @[TLB.scala:174:105, :339:29] wire _sector_hits_T_63 = _sector_hits_T_61 & _sector_hits_T_62; // @[TLB.scala:174:{86,95,105}] wire sector_hits_7 = _sector_hits_T_58 & _sector_hits_T_63; // @[package.scala:81:59] wire _superpage_hits_tagMatch_T = ~superpage_entries_0_tag_v; // @[TLB.scala:178:43, :341:30] wire superpage_hits_tagMatch = superpage_entries_0_valid_0 & _superpage_hits_tagMatch_T; // @[TLB.scala:178:{33,43}, :341:30] wire [35:0] _T_3451 = superpage_entries_0_tag_vpn ^ vpn; // @[TLB.scala:183:52, :335:30, :341:30] wire [35:0] _superpage_hits_T; // @[TLB.scala:183:52] assign _superpage_hits_T = _T_3451; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_5; // @[TLB.scala:183:52] assign _superpage_hits_T_5 = _T_3451; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_10; // @[TLB.scala:183:52] assign _superpage_hits_T_10 = _T_3451; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_15; // @[TLB.scala:183:52] assign _superpage_hits_T_15 = _T_3451; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_48; // @[TLB.scala:183:52] assign _hitsVec_T_48 = _T_3451; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_53; // @[TLB.scala:183:52] assign _hitsVec_T_53 = _T_3451; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_58; // @[TLB.scala:183:52] assign _hitsVec_T_58 = _T_3451; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_63; // @[TLB.scala:183:52] assign _hitsVec_T_63 = _T_3451; // @[TLB.scala:183:52] wire [8:0] _superpage_hits_T_1 = _superpage_hits_T[35:27]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_2 = _superpage_hits_T_1 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_3 = _superpage_hits_T_2; // @[TLB.scala:183:{40,79}] wire _superpage_hits_T_4 = superpage_hits_tagMatch & _superpage_hits_T_3; // @[TLB.scala:178:33, :183:{29,40}] wire _GEN_13 = superpage_entries_0_level == 2'h0; // @[TLB.scala:182:28, :341:30] wire _superpage_hits_ignore_T_1; // @[TLB.scala:182:28] assign _superpage_hits_ignore_T_1 = _GEN_13; // @[TLB.scala:182:28] wire _hitsVec_ignore_T_1; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_1 = _GEN_13; // @[TLB.scala:182:28] wire _ppn_ignore_T; // @[TLB.scala:197:28] assign _ppn_ignore_T = _GEN_13; // @[TLB.scala:182:28, :197:28] wire _ignore_T_1; // @[TLB.scala:182:28] assign _ignore_T_1 = _GEN_13; // @[TLB.scala:182:28] wire superpage_hits_ignore_1 = _superpage_hits_ignore_T_1; // @[TLB.scala:182:{28,34}] wire [8:0] _superpage_hits_T_6 = _superpage_hits_T_5[26:18]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_7 = _superpage_hits_T_6 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_8 = superpage_hits_ignore_1 | _superpage_hits_T_7; // @[TLB.scala:182:34, :183:{40,79}] wire _superpage_hits_T_9 = _superpage_hits_T_4 & _superpage_hits_T_8; // @[TLB.scala:183:{29,40}] wire _superpage_hits_ignore_T_2 = ~(superpage_entries_0_level[1]); // @[TLB.scala:182:28, :341:30] wire superpage_hits_ignore_2 = _superpage_hits_ignore_T_2; // @[TLB.scala:182:{28,34}] wire [8:0] _superpage_hits_T_11 = _superpage_hits_T_10[17:9]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_12 = _superpage_hits_T_11 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_13 = superpage_hits_ignore_2 | _superpage_hits_T_12; // @[TLB.scala:182:34, :183:{40,79}] wire _superpage_hits_T_14 = _superpage_hits_T_9 & _superpage_hits_T_13; // @[TLB.scala:183:{29,40}] wire superpage_hits_0 = _superpage_hits_T_14; // @[TLB.scala:183:29] wire _GEN_14 = superpage_entries_0_level != 2'h3; // @[TLB.scala:182:28, :341:30] wire _superpage_hits_ignore_T_3; // @[TLB.scala:182:28] assign _superpage_hits_ignore_T_3 = _GEN_14; // @[TLB.scala:182:28] wire _hitsVec_ignore_T_3; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_3 = _GEN_14; // @[TLB.scala:182:28] wire _ppn_ignore_T_2; // @[TLB.scala:197:28] assign _ppn_ignore_T_2 = _GEN_14; // @[TLB.scala:182:28, :197:28] wire _ignore_T_3; // @[TLB.scala:182:28] assign _ignore_T_3 = _GEN_14; // @[TLB.scala:182:28] wire [8:0] _superpage_hits_T_16 = _superpage_hits_T_15[8:0]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_17 = _superpage_hits_T_16 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_tagMatch_T_1 = ~superpage_entries_1_tag_v; // @[TLB.scala:178:43, :341:30] wire superpage_hits_tagMatch_1 = superpage_entries_1_valid_0 & _superpage_hits_tagMatch_T_1; // @[TLB.scala:178:{33,43}, :341:30] wire [35:0] _T_3554 = superpage_entries_1_tag_vpn ^ vpn; // @[TLB.scala:183:52, :335:30, :341:30] wire [35:0] _superpage_hits_T_19; // @[TLB.scala:183:52] assign _superpage_hits_T_19 = _T_3554; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_24; // @[TLB.scala:183:52] assign _superpage_hits_T_24 = _T_3554; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_29; // @[TLB.scala:183:52] assign _superpage_hits_T_29 = _T_3554; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_34; // @[TLB.scala:183:52] assign _superpage_hits_T_34 = _T_3554; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_68; // @[TLB.scala:183:52] assign _hitsVec_T_68 = _T_3554; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_73; // @[TLB.scala:183:52] assign _hitsVec_T_73 = _T_3554; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_78; // @[TLB.scala:183:52] assign _hitsVec_T_78 = _T_3554; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_83; // @[TLB.scala:183:52] assign _hitsVec_T_83 = _T_3554; // @[TLB.scala:183:52] wire [8:0] _superpage_hits_T_20 = _superpage_hits_T_19[35:27]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_21 = _superpage_hits_T_20 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_22 = _superpage_hits_T_21; // @[TLB.scala:183:{40,79}] wire _superpage_hits_T_23 = superpage_hits_tagMatch_1 & _superpage_hits_T_22; // @[TLB.scala:178:33, :183:{29,40}] wire _GEN_15 = superpage_entries_1_level == 2'h0; // @[TLB.scala:182:28, :341:30] wire _superpage_hits_ignore_T_5; // @[TLB.scala:182:28] assign _superpage_hits_ignore_T_5 = _GEN_15; // @[TLB.scala:182:28] wire _hitsVec_ignore_T_5; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_5 = _GEN_15; // @[TLB.scala:182:28] wire _ppn_ignore_T_3; // @[TLB.scala:197:28] assign _ppn_ignore_T_3 = _GEN_15; // @[TLB.scala:182:28, :197:28] wire _ignore_T_5; // @[TLB.scala:182:28] assign _ignore_T_5 = _GEN_15; // @[TLB.scala:182:28] wire superpage_hits_ignore_5 = _superpage_hits_ignore_T_5; // @[TLB.scala:182:{28,34}] wire [8:0] _superpage_hits_T_25 = _superpage_hits_T_24[26:18]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_26 = _superpage_hits_T_25 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_27 = superpage_hits_ignore_5 | _superpage_hits_T_26; // @[TLB.scala:182:34, :183:{40,79}] wire _superpage_hits_T_28 = _superpage_hits_T_23 & _superpage_hits_T_27; // @[TLB.scala:183:{29,40}] wire _superpage_hits_ignore_T_6 = ~(superpage_entries_1_level[1]); // @[TLB.scala:182:28, :341:30] wire superpage_hits_ignore_6 = _superpage_hits_ignore_T_6; // @[TLB.scala:182:{28,34}] wire [8:0] _superpage_hits_T_30 = _superpage_hits_T_29[17:9]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_31 = _superpage_hits_T_30 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_32 = superpage_hits_ignore_6 | _superpage_hits_T_31; // @[TLB.scala:182:34, :183:{40,79}] wire _superpage_hits_T_33 = _superpage_hits_T_28 & _superpage_hits_T_32; // @[TLB.scala:183:{29,40}] wire superpage_hits_1 = _superpage_hits_T_33; // @[TLB.scala:183:29] wire _GEN_16 = superpage_entries_1_level != 2'h3; // @[TLB.scala:182:28, :341:30] wire _superpage_hits_ignore_T_7; // @[TLB.scala:182:28] assign _superpage_hits_ignore_T_7 = _GEN_16; // @[TLB.scala:182:28] wire _hitsVec_ignore_T_7; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_7 = _GEN_16; // @[TLB.scala:182:28] wire _ppn_ignore_T_5; // @[TLB.scala:197:28] assign _ppn_ignore_T_5 = _GEN_16; // @[TLB.scala:182:28, :197:28] wire _ignore_T_7; // @[TLB.scala:182:28] assign _ignore_T_7 = _GEN_16; // @[TLB.scala:182:28] wire [8:0] _superpage_hits_T_35 = _superpage_hits_T_34[8:0]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_36 = _superpage_hits_T_35 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_tagMatch_T_2 = ~superpage_entries_2_tag_v; // @[TLB.scala:178:43, :341:30] wire superpage_hits_tagMatch_2 = superpage_entries_2_valid_0 & _superpage_hits_tagMatch_T_2; // @[TLB.scala:178:{33,43}, :341:30] wire [35:0] _T_3657 = superpage_entries_2_tag_vpn ^ vpn; // @[TLB.scala:183:52, :335:30, :341:30] wire [35:0] _superpage_hits_T_38; // @[TLB.scala:183:52] assign _superpage_hits_T_38 = _T_3657; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_43; // @[TLB.scala:183:52] assign _superpage_hits_T_43 = _T_3657; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_48; // @[TLB.scala:183:52] assign _superpage_hits_T_48 = _T_3657; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_53; // @[TLB.scala:183:52] assign _superpage_hits_T_53 = _T_3657; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_88; // @[TLB.scala:183:52] assign _hitsVec_T_88 = _T_3657; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_93; // @[TLB.scala:183:52] assign _hitsVec_T_93 = _T_3657; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_98; // @[TLB.scala:183:52] assign _hitsVec_T_98 = _T_3657; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_103; // @[TLB.scala:183:52] assign _hitsVec_T_103 = _T_3657; // @[TLB.scala:183:52] wire [8:0] _superpage_hits_T_39 = _superpage_hits_T_38[35:27]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_40 = _superpage_hits_T_39 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_41 = _superpage_hits_T_40; // @[TLB.scala:183:{40,79}] wire _superpage_hits_T_42 = superpage_hits_tagMatch_2 & _superpage_hits_T_41; // @[TLB.scala:178:33, :183:{29,40}] wire _GEN_17 = superpage_entries_2_level == 2'h0; // @[TLB.scala:182:28, :341:30] wire _superpage_hits_ignore_T_9; // @[TLB.scala:182:28] assign _superpage_hits_ignore_T_9 = _GEN_17; // @[TLB.scala:182:28] wire _hitsVec_ignore_T_9; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_9 = _GEN_17; // @[TLB.scala:182:28] wire _ppn_ignore_T_6; // @[TLB.scala:197:28] assign _ppn_ignore_T_6 = _GEN_17; // @[TLB.scala:182:28, :197:28] wire _ignore_T_9; // @[TLB.scala:182:28] assign _ignore_T_9 = _GEN_17; // @[TLB.scala:182:28] wire superpage_hits_ignore_9 = _superpage_hits_ignore_T_9; // @[TLB.scala:182:{28,34}] wire [8:0] _superpage_hits_T_44 = _superpage_hits_T_43[26:18]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_45 = _superpage_hits_T_44 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_46 = superpage_hits_ignore_9 | _superpage_hits_T_45; // @[TLB.scala:182:34, :183:{40,79}] wire _superpage_hits_T_47 = _superpage_hits_T_42 & _superpage_hits_T_46; // @[TLB.scala:183:{29,40}] wire _superpage_hits_ignore_T_10 = ~(superpage_entries_2_level[1]); // @[TLB.scala:182:28, :341:30] wire superpage_hits_ignore_10 = _superpage_hits_ignore_T_10; // @[TLB.scala:182:{28,34}] wire [8:0] _superpage_hits_T_49 = _superpage_hits_T_48[17:9]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_50 = _superpage_hits_T_49 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_51 = superpage_hits_ignore_10 | _superpage_hits_T_50; // @[TLB.scala:182:34, :183:{40,79}] wire _superpage_hits_T_52 = _superpage_hits_T_47 & _superpage_hits_T_51; // @[TLB.scala:183:{29,40}] wire superpage_hits_2 = _superpage_hits_T_52; // @[TLB.scala:183:29] wire _GEN_18 = superpage_entries_2_level != 2'h3; // @[TLB.scala:182:28, :341:30] wire _superpage_hits_ignore_T_11; // @[TLB.scala:182:28] assign _superpage_hits_ignore_T_11 = _GEN_18; // @[TLB.scala:182:28] wire _hitsVec_ignore_T_11; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_11 = _GEN_18; // @[TLB.scala:182:28] wire _ppn_ignore_T_8; // @[TLB.scala:197:28] assign _ppn_ignore_T_8 = _GEN_18; // @[TLB.scala:182:28, :197:28] wire _ignore_T_11; // @[TLB.scala:182:28] assign _ignore_T_11 = _GEN_18; // @[TLB.scala:182:28] wire [8:0] _superpage_hits_T_54 = _superpage_hits_T_53[8:0]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_55 = _superpage_hits_T_54 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_tagMatch_T_3 = ~superpage_entries_3_tag_v; // @[TLB.scala:178:43, :341:30] wire superpage_hits_tagMatch_3 = superpage_entries_3_valid_0 & _superpage_hits_tagMatch_T_3; // @[TLB.scala:178:{33,43}, :341:30] wire [35:0] _T_3760 = superpage_entries_3_tag_vpn ^ vpn; // @[TLB.scala:183:52, :335:30, :341:30] wire [35:0] _superpage_hits_T_57; // @[TLB.scala:183:52] assign _superpage_hits_T_57 = _T_3760; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_62; // @[TLB.scala:183:52] assign _superpage_hits_T_62 = _T_3760; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_67; // @[TLB.scala:183:52] assign _superpage_hits_T_67 = _T_3760; // @[TLB.scala:183:52] wire [35:0] _superpage_hits_T_72; // @[TLB.scala:183:52] assign _superpage_hits_T_72 = _T_3760; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_108; // @[TLB.scala:183:52] assign _hitsVec_T_108 = _T_3760; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_113; // @[TLB.scala:183:52] assign _hitsVec_T_113 = _T_3760; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_118; // @[TLB.scala:183:52] assign _hitsVec_T_118 = _T_3760; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_123; // @[TLB.scala:183:52] assign _hitsVec_T_123 = _T_3760; // @[TLB.scala:183:52] wire [8:0] _superpage_hits_T_58 = _superpage_hits_T_57[35:27]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_59 = _superpage_hits_T_58 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_60 = _superpage_hits_T_59; // @[TLB.scala:183:{40,79}] wire _superpage_hits_T_61 = superpage_hits_tagMatch_3 & _superpage_hits_T_60; // @[TLB.scala:178:33, :183:{29,40}] wire _GEN_19 = superpage_entries_3_level == 2'h0; // @[TLB.scala:182:28, :341:30] wire _superpage_hits_ignore_T_13; // @[TLB.scala:182:28] assign _superpage_hits_ignore_T_13 = _GEN_19; // @[TLB.scala:182:28] wire _hitsVec_ignore_T_13; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_13 = _GEN_19; // @[TLB.scala:182:28] wire _ppn_ignore_T_9; // @[TLB.scala:197:28] assign _ppn_ignore_T_9 = _GEN_19; // @[TLB.scala:182:28, :197:28] wire _ignore_T_13; // @[TLB.scala:182:28] assign _ignore_T_13 = _GEN_19; // @[TLB.scala:182:28] wire superpage_hits_ignore_13 = _superpage_hits_ignore_T_13; // @[TLB.scala:182:{28,34}] wire [8:0] _superpage_hits_T_63 = _superpage_hits_T_62[26:18]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_64 = _superpage_hits_T_63 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_65 = superpage_hits_ignore_13 | _superpage_hits_T_64; // @[TLB.scala:182:34, :183:{40,79}] wire _superpage_hits_T_66 = _superpage_hits_T_61 & _superpage_hits_T_65; // @[TLB.scala:183:{29,40}] wire _superpage_hits_ignore_T_14 = ~(superpage_entries_3_level[1]); // @[TLB.scala:182:28, :341:30] wire superpage_hits_ignore_14 = _superpage_hits_ignore_T_14; // @[TLB.scala:182:{28,34}] wire [8:0] _superpage_hits_T_68 = _superpage_hits_T_67[17:9]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_69 = _superpage_hits_T_68 == 9'h0; // @[TLB.scala:183:{58,79}] wire _superpage_hits_T_70 = superpage_hits_ignore_14 | _superpage_hits_T_69; // @[TLB.scala:182:34, :183:{40,79}] wire _superpage_hits_T_71 = _superpage_hits_T_66 & _superpage_hits_T_70; // @[TLB.scala:183:{29,40}] wire superpage_hits_3 = _superpage_hits_T_71; // @[TLB.scala:183:29] wire _GEN_20 = superpage_entries_3_level != 2'h3; // @[TLB.scala:182:28, :341:30] wire _superpage_hits_ignore_T_15; // @[TLB.scala:182:28] assign _superpage_hits_ignore_T_15 = _GEN_20; // @[TLB.scala:182:28] wire _hitsVec_ignore_T_15; // @[TLB.scala:182:28] assign _hitsVec_ignore_T_15 = _GEN_20; // @[TLB.scala:182:28] wire _ppn_ignore_T_11; // @[TLB.scala:197:28] assign _ppn_ignore_T_11 = _GEN_20; // @[TLB.scala:182:28, :197:28] wire _ignore_T_15; // @[TLB.scala:182:28] assign _ignore_T_15 = _GEN_20; // @[TLB.scala:182:28] wire [8:0] _superpage_hits_T_73 = _superpage_hits_T_72[8:0]; // @[TLB.scala:183:{52,58}] wire _superpage_hits_T_74 = _superpage_hits_T_73 == 9'h0; // @[TLB.scala:183:{58,79}] wire [1:0] hitsVec_idx = vpn[1:0]; // @[package.scala:163:13] wire [1:0] hitsVec_idx_1 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] hitsVec_idx_2 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] hitsVec_idx_3 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] hitsVec_idx_4 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] hitsVec_idx_5 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] hitsVec_idx_6 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] hitsVec_idx_7 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] _entries_T = vpn[1:0]; // @[package.scala:163:13] wire [1:0] _entries_T_24 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] _entries_T_48 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] _entries_T_72 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] _entries_T_96 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] _entries_T_120 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] _entries_T_144 = vpn[1:0]; // @[package.scala:163:13] wire [1:0] _entries_T_168 = vpn[1:0]; // @[package.scala:163:13] wire [33:0] _hitsVec_T_1 = _hitsVec_T[35:2]; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_2 = _hitsVec_T_1 == 34'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_3 = ~sectored_entries_0_0_tag_v; // @[TLB.scala:174:105, :339:29] wire _hitsVec_T_4 = _hitsVec_T_2 & _hitsVec_T_3; // @[TLB.scala:174:{86,95,105}] wire [3:0] _GEN_21 = {{sectored_entries_0_0_valid_3}, {sectored_entries_0_0_valid_2}, {sectored_entries_0_0_valid_1}, {sectored_entries_0_0_valid_0}}; // @[TLB.scala:188:18, :339:29] wire _hitsVec_T_5 = _GEN_21[hitsVec_idx] & _hitsVec_T_4; // @[package.scala:163:13] wire hitsVec_0 = vm_enabled & _hitsVec_T_5; // @[TLB.scala:188:18, :399:61, :440:44] wire [33:0] _hitsVec_T_7 = _hitsVec_T_6[35:2]; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_8 = _hitsVec_T_7 == 34'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_9 = ~sectored_entries_0_1_tag_v; // @[TLB.scala:174:105, :339:29] wire _hitsVec_T_10 = _hitsVec_T_8 & _hitsVec_T_9; // @[TLB.scala:174:{86,95,105}] wire [3:0] _GEN_22 = {{sectored_entries_0_1_valid_3}, {sectored_entries_0_1_valid_2}, {sectored_entries_0_1_valid_1}, {sectored_entries_0_1_valid_0}}; // @[TLB.scala:188:18, :339:29] wire _hitsVec_T_11 = _GEN_22[hitsVec_idx_1] & _hitsVec_T_10; // @[package.scala:163:13] wire hitsVec_1 = vm_enabled & _hitsVec_T_11; // @[TLB.scala:188:18, :399:61, :440:44] wire [33:0] _hitsVec_T_13 = _hitsVec_T_12[35:2]; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_14 = _hitsVec_T_13 == 34'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_15 = ~sectored_entries_0_2_tag_v; // @[TLB.scala:174:105, :339:29] wire _hitsVec_T_16 = _hitsVec_T_14 & _hitsVec_T_15; // @[TLB.scala:174:{86,95,105}] wire [3:0] _GEN_23 = {{sectored_entries_0_2_valid_3}, {sectored_entries_0_2_valid_2}, {sectored_entries_0_2_valid_1}, {sectored_entries_0_2_valid_0}}; // @[TLB.scala:188:18, :339:29] wire _hitsVec_T_17 = _GEN_23[hitsVec_idx_2] & _hitsVec_T_16; // @[package.scala:163:13] wire hitsVec_2 = vm_enabled & _hitsVec_T_17; // @[TLB.scala:188:18, :399:61, :440:44] wire [33:0] _hitsVec_T_19 = _hitsVec_T_18[35:2]; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_20 = _hitsVec_T_19 == 34'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_21 = ~sectored_entries_0_3_tag_v; // @[TLB.scala:174:105, :339:29] wire _hitsVec_T_22 = _hitsVec_T_20 & _hitsVec_T_21; // @[TLB.scala:174:{86,95,105}] wire [3:0] _GEN_24 = {{sectored_entries_0_3_valid_3}, {sectored_entries_0_3_valid_2}, {sectored_entries_0_3_valid_1}, {sectored_entries_0_3_valid_0}}; // @[TLB.scala:188:18, :339:29] wire _hitsVec_T_23 = _GEN_24[hitsVec_idx_3] & _hitsVec_T_22; // @[package.scala:163:13] wire hitsVec_3 = vm_enabled & _hitsVec_T_23; // @[TLB.scala:188:18, :399:61, :440:44] wire [33:0] _hitsVec_T_25 = _hitsVec_T_24[35:2]; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_26 = _hitsVec_T_25 == 34'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_27 = ~sectored_entries_0_4_tag_v; // @[TLB.scala:174:105, :339:29] wire _hitsVec_T_28 = _hitsVec_T_26 & _hitsVec_T_27; // @[TLB.scala:174:{86,95,105}] wire [3:0] _GEN_25 = {{sectored_entries_0_4_valid_3}, {sectored_entries_0_4_valid_2}, {sectored_entries_0_4_valid_1}, {sectored_entries_0_4_valid_0}}; // @[TLB.scala:188:18, :339:29] wire _hitsVec_T_29 = _GEN_25[hitsVec_idx_4] & _hitsVec_T_28; // @[package.scala:163:13] wire hitsVec_4 = vm_enabled & _hitsVec_T_29; // @[TLB.scala:188:18, :399:61, :440:44] wire [33:0] _hitsVec_T_31 = _hitsVec_T_30[35:2]; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_32 = _hitsVec_T_31 == 34'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_33 = ~sectored_entries_0_5_tag_v; // @[TLB.scala:174:105, :339:29] wire _hitsVec_T_34 = _hitsVec_T_32 & _hitsVec_T_33; // @[TLB.scala:174:{86,95,105}] wire [3:0] _GEN_26 = {{sectored_entries_0_5_valid_3}, {sectored_entries_0_5_valid_2}, {sectored_entries_0_5_valid_1}, {sectored_entries_0_5_valid_0}}; // @[TLB.scala:188:18, :339:29] wire _hitsVec_T_35 = _GEN_26[hitsVec_idx_5] & _hitsVec_T_34; // @[package.scala:163:13] wire hitsVec_5 = vm_enabled & _hitsVec_T_35; // @[TLB.scala:188:18, :399:61, :440:44] wire [33:0] _hitsVec_T_37 = _hitsVec_T_36[35:2]; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_38 = _hitsVec_T_37 == 34'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_39 = ~sectored_entries_0_6_tag_v; // @[TLB.scala:174:105, :339:29] wire _hitsVec_T_40 = _hitsVec_T_38 & _hitsVec_T_39; // @[TLB.scala:174:{86,95,105}] wire [3:0] _GEN_27 = {{sectored_entries_0_6_valid_3}, {sectored_entries_0_6_valid_2}, {sectored_entries_0_6_valid_1}, {sectored_entries_0_6_valid_0}}; // @[TLB.scala:188:18, :339:29] wire _hitsVec_T_41 = _GEN_27[hitsVec_idx_6] & _hitsVec_T_40; // @[package.scala:163:13] wire hitsVec_6 = vm_enabled & _hitsVec_T_41; // @[TLB.scala:188:18, :399:61, :440:44] wire [33:0] _hitsVec_T_43 = _hitsVec_T_42[35:2]; // @[TLB.scala:174:{61,68}] wire _hitsVec_T_44 = _hitsVec_T_43 == 34'h0; // @[TLB.scala:174:{68,86}] wire _hitsVec_T_45 = ~sectored_entries_0_7_tag_v; // @[TLB.scala:174:105, :339:29] wire _hitsVec_T_46 = _hitsVec_T_44 & _hitsVec_T_45; // @[TLB.scala:174:{86,95,105}] wire [3:0] _GEN_28 = {{sectored_entries_0_7_valid_3}, {sectored_entries_0_7_valid_2}, {sectored_entries_0_7_valid_1}, {sectored_entries_0_7_valid_0}}; // @[TLB.scala:188:18, :339:29] wire _hitsVec_T_47 = _GEN_28[hitsVec_idx_7] & _hitsVec_T_46; // @[package.scala:163:13] wire hitsVec_7 = vm_enabled & _hitsVec_T_47; // @[TLB.scala:188:18, :399:61, :440:44] wire _hitsVec_tagMatch_T = ~superpage_entries_0_tag_v; // @[TLB.scala:178:43, :341:30] wire hitsVec_tagMatch = superpage_entries_0_valid_0 & _hitsVec_tagMatch_T; // @[TLB.scala:178:{33,43}, :341:30] wire [8:0] _hitsVec_T_49 = _hitsVec_T_48[35:27]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_50 = _hitsVec_T_49 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_51 = _hitsVec_T_50; // @[TLB.scala:183:{40,79}] wire _hitsVec_T_52 = hitsVec_tagMatch & _hitsVec_T_51; // @[TLB.scala:178:33, :183:{29,40}] wire hitsVec_ignore_1 = _hitsVec_ignore_T_1; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_54 = _hitsVec_T_53[26:18]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_55 = _hitsVec_T_54 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_56 = hitsVec_ignore_1 | _hitsVec_T_55; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_57 = _hitsVec_T_52 & _hitsVec_T_56; // @[TLB.scala:183:{29,40}] wire _hitsVec_ignore_T_2 = ~(superpage_entries_0_level[1]); // @[TLB.scala:182:28, :341:30] wire hitsVec_ignore_2 = _hitsVec_ignore_T_2; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_59 = _hitsVec_T_58[17:9]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_60 = _hitsVec_T_59 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_61 = hitsVec_ignore_2 | _hitsVec_T_60; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_62 = _hitsVec_T_57 & _hitsVec_T_61; // @[TLB.scala:183:{29,40}] wire _hitsVec_T_67 = _hitsVec_T_62; // @[TLB.scala:183:29] wire [8:0] _hitsVec_T_64 = _hitsVec_T_63[8:0]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_65 = _hitsVec_T_64 == 9'h0; // @[TLB.scala:183:{58,79}] wire hitsVec_8 = vm_enabled & _hitsVec_T_67; // @[TLB.scala:183:29, :399:61, :440:44] wire _hitsVec_tagMatch_T_1 = ~superpage_entries_1_tag_v; // @[TLB.scala:178:43, :341:30] wire hitsVec_tagMatch_1 = superpage_entries_1_valid_0 & _hitsVec_tagMatch_T_1; // @[TLB.scala:178:{33,43}, :341:30] wire [8:0] _hitsVec_T_69 = _hitsVec_T_68[35:27]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_70 = _hitsVec_T_69 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_71 = _hitsVec_T_70; // @[TLB.scala:183:{40,79}] wire _hitsVec_T_72 = hitsVec_tagMatch_1 & _hitsVec_T_71; // @[TLB.scala:178:33, :183:{29,40}] wire hitsVec_ignore_5 = _hitsVec_ignore_T_5; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_74 = _hitsVec_T_73[26:18]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_75 = _hitsVec_T_74 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_76 = hitsVec_ignore_5 | _hitsVec_T_75; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_77 = _hitsVec_T_72 & _hitsVec_T_76; // @[TLB.scala:183:{29,40}] wire _hitsVec_ignore_T_6 = ~(superpage_entries_1_level[1]); // @[TLB.scala:182:28, :341:30] wire hitsVec_ignore_6 = _hitsVec_ignore_T_6; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_79 = _hitsVec_T_78[17:9]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_80 = _hitsVec_T_79 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_81 = hitsVec_ignore_6 | _hitsVec_T_80; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_82 = _hitsVec_T_77 & _hitsVec_T_81; // @[TLB.scala:183:{29,40}] wire _hitsVec_T_87 = _hitsVec_T_82; // @[TLB.scala:183:29] wire [8:0] _hitsVec_T_84 = _hitsVec_T_83[8:0]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_85 = _hitsVec_T_84 == 9'h0; // @[TLB.scala:183:{58,79}] wire hitsVec_9 = vm_enabled & _hitsVec_T_87; // @[TLB.scala:183:29, :399:61, :440:44] wire _hitsVec_tagMatch_T_2 = ~superpage_entries_2_tag_v; // @[TLB.scala:178:43, :341:30] wire hitsVec_tagMatch_2 = superpage_entries_2_valid_0 & _hitsVec_tagMatch_T_2; // @[TLB.scala:178:{33,43}, :341:30] wire [8:0] _hitsVec_T_89 = _hitsVec_T_88[35:27]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_90 = _hitsVec_T_89 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_91 = _hitsVec_T_90; // @[TLB.scala:183:{40,79}] wire _hitsVec_T_92 = hitsVec_tagMatch_2 & _hitsVec_T_91; // @[TLB.scala:178:33, :183:{29,40}] wire hitsVec_ignore_9 = _hitsVec_ignore_T_9; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_94 = _hitsVec_T_93[26:18]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_95 = _hitsVec_T_94 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_96 = hitsVec_ignore_9 | _hitsVec_T_95; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_97 = _hitsVec_T_92 & _hitsVec_T_96; // @[TLB.scala:183:{29,40}] wire _hitsVec_ignore_T_10 = ~(superpage_entries_2_level[1]); // @[TLB.scala:182:28, :341:30] wire hitsVec_ignore_10 = _hitsVec_ignore_T_10; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_99 = _hitsVec_T_98[17:9]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_100 = _hitsVec_T_99 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_101 = hitsVec_ignore_10 | _hitsVec_T_100; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_102 = _hitsVec_T_97 & _hitsVec_T_101; // @[TLB.scala:183:{29,40}] wire _hitsVec_T_107 = _hitsVec_T_102; // @[TLB.scala:183:29] wire [8:0] _hitsVec_T_104 = _hitsVec_T_103[8:0]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_105 = _hitsVec_T_104 == 9'h0; // @[TLB.scala:183:{58,79}] wire hitsVec_10 = vm_enabled & _hitsVec_T_107; // @[TLB.scala:183:29, :399:61, :440:44] wire _hitsVec_tagMatch_T_3 = ~superpage_entries_3_tag_v; // @[TLB.scala:178:43, :341:30] wire hitsVec_tagMatch_3 = superpage_entries_3_valid_0 & _hitsVec_tagMatch_T_3; // @[TLB.scala:178:{33,43}, :341:30] wire [8:0] _hitsVec_T_109 = _hitsVec_T_108[35:27]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_110 = _hitsVec_T_109 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_111 = _hitsVec_T_110; // @[TLB.scala:183:{40,79}] wire _hitsVec_T_112 = hitsVec_tagMatch_3 & _hitsVec_T_111; // @[TLB.scala:178:33, :183:{29,40}] wire hitsVec_ignore_13 = _hitsVec_ignore_T_13; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_114 = _hitsVec_T_113[26:18]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_115 = _hitsVec_T_114 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_116 = hitsVec_ignore_13 | _hitsVec_T_115; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_117 = _hitsVec_T_112 & _hitsVec_T_116; // @[TLB.scala:183:{29,40}] wire _hitsVec_ignore_T_14 = ~(superpage_entries_3_level[1]); // @[TLB.scala:182:28, :341:30] wire hitsVec_ignore_14 = _hitsVec_ignore_T_14; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_119 = _hitsVec_T_118[17:9]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_120 = _hitsVec_T_119 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_121 = hitsVec_ignore_14 | _hitsVec_T_120; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_122 = _hitsVec_T_117 & _hitsVec_T_121; // @[TLB.scala:183:{29,40}] wire _hitsVec_T_127 = _hitsVec_T_122; // @[TLB.scala:183:29] wire [8:0] _hitsVec_T_124 = _hitsVec_T_123[8:0]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_125 = _hitsVec_T_124 == 9'h0; // @[TLB.scala:183:{58,79}] wire hitsVec_11 = vm_enabled & _hitsVec_T_127; // @[TLB.scala:183:29, :399:61, :440:44] wire _hitsVec_tagMatch_T_4 = ~special_entry_tag_v; // @[TLB.scala:178:43, :346:56] wire hitsVec_tagMatch_4 = special_entry_valid_0 & _hitsVec_tagMatch_T_4; // @[TLB.scala:178:{33,43}, :346:56] wire [35:0] _T_3863 = special_entry_tag_vpn ^ vpn; // @[TLB.scala:183:52, :335:30, :346:56] wire [35:0] _hitsVec_T_128; // @[TLB.scala:183:52] assign _hitsVec_T_128 = _T_3863; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_133; // @[TLB.scala:183:52] assign _hitsVec_T_133 = _T_3863; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_138; // @[TLB.scala:183:52] assign _hitsVec_T_138 = _T_3863; // @[TLB.scala:183:52] wire [35:0] _hitsVec_T_143; // @[TLB.scala:183:52] assign _hitsVec_T_143 = _T_3863; // @[TLB.scala:183:52] wire [8:0] _hitsVec_T_129 = _hitsVec_T_128[35:27]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_130 = _hitsVec_T_129 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_131 = _hitsVec_T_130; // @[TLB.scala:183:{40,79}] wire _hitsVec_T_132 = hitsVec_tagMatch_4 & _hitsVec_T_131; // @[TLB.scala:178:33, :183:{29,40}] wire hitsVec_ignore_17 = _hitsVec_ignore_T_17; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_134 = _hitsVec_T_133[26:18]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_135 = _hitsVec_T_134 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_136 = hitsVec_ignore_17 | _hitsVec_T_135; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_137 = _hitsVec_T_132 & _hitsVec_T_136; // @[TLB.scala:183:{29,40}] wire _hitsVec_ignore_T_18 = ~(special_entry_level[1]); // @[TLB.scala:182:28, :197:28, :346:56] wire hitsVec_ignore_18 = _hitsVec_ignore_T_18; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_139 = _hitsVec_T_138[17:9]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_140 = _hitsVec_T_139 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_141 = hitsVec_ignore_18 | _hitsVec_T_140; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_142 = _hitsVec_T_137 & _hitsVec_T_141; // @[TLB.scala:183:{29,40}] wire hitsVec_ignore_19 = _hitsVec_ignore_T_19; // @[TLB.scala:182:{28,34}] wire [8:0] _hitsVec_T_144 = _hitsVec_T_143[8:0]; // @[TLB.scala:183:{52,58}] wire _hitsVec_T_145 = _hitsVec_T_144 == 9'h0; // @[TLB.scala:183:{58,79}] wire _hitsVec_T_146 = hitsVec_ignore_19 | _hitsVec_T_145; // @[TLB.scala:182:34, :183:{40,79}] wire _hitsVec_T_147 = _hitsVec_T_142 & _hitsVec_T_146; // @[TLB.scala:183:{29,40}] wire hitsVec_12 = vm_enabled & _hitsVec_T_147; // @[TLB.scala:183:29, :399:61, :440:44] wire [1:0] real_hits_lo_lo_hi = {hitsVec_2, hitsVec_1}; // @[package.scala:45:27] wire [2:0] real_hits_lo_lo = {real_hits_lo_lo_hi, hitsVec_0}; // @[package.scala:45:27] wire [1:0] real_hits_lo_hi_hi = {hitsVec_5, hitsVec_4}; // @[package.scala:45:27] wire [2:0] real_hits_lo_hi = {real_hits_lo_hi_hi, hitsVec_3}; // @[package.scala:45:27] wire [5:0] real_hits_lo = {real_hits_lo_hi, real_hits_lo_lo}; // @[package.scala:45:27] wire [1:0] real_hits_hi_lo_hi = {hitsVec_8, hitsVec_7}; // @[package.scala:45:27] wire [2:0] real_hits_hi_lo = {real_hits_hi_lo_hi, hitsVec_6}; // @[package.scala:45:27] wire [1:0] real_hits_hi_hi_lo = {hitsVec_10, hitsVec_9}; // @[package.scala:45:27] wire [1:0] real_hits_hi_hi_hi = {hitsVec_12, hitsVec_11}; // @[package.scala:45:27] wire [3:0] real_hits_hi_hi = {real_hits_hi_hi_hi, real_hits_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] real_hits_hi = {real_hits_hi_hi, real_hits_hi_lo}; // @[package.scala:45:27] wire [12:0] real_hits = {real_hits_hi, real_hits_lo}; // @[package.scala:45:27] wire [12:0] _tlb_hit_T = real_hits; // @[package.scala:45:27] wire _hits_T = ~vm_enabled; // @[TLB.scala:399:61, :442:18] wire [13:0] hits = {_hits_T, real_hits}; // @[package.scala:45:27] wire _newEntry_g_T; // @[TLB.scala:453:25] wire _newEntry_sw_T_6; // @[PTW.scala:151:40] wire _newEntry_sx_T_5; // @[PTW.scala:153:35] wire _newEntry_sr_T_5; // @[PTW.scala:149:35] wire newEntry_g; // @[TLB.scala:449:24] wire newEntry_sw; // @[TLB.scala:449:24] wire newEntry_sx; // @[TLB.scala:449:24] wire newEntry_sr; // @[TLB.scala:449:24] wire newEntry_ppp; // @[TLB.scala:449:24] wire newEntry_pal; // @[TLB.scala:449:24] wire newEntry_paa; // @[TLB.scala:449:24] wire newEntry_eff; // @[TLB.scala:449:24] assign _newEntry_g_T = io_ptw_resp_bits_pte_g_0 & io_ptw_resp_bits_pte_v_0; // @[TLB.scala:318:7, :453:25] assign newEntry_g = _newEntry_g_T; // @[TLB.scala:449:24, :453:25] wire _newEntry_ae_stage2_T = io_ptw_resp_bits_ae_final_0 & io_ptw_resp_bits_gpa_is_pte_0; // @[TLB.scala:318:7, :456:53] wire _newEntry_sr_T = ~io_ptw_resp_bits_pte_w_0; // @[TLB.scala:318:7] wire _newEntry_sr_T_1 = io_ptw_resp_bits_pte_x_0 & _newEntry_sr_T; // @[TLB.scala:318:7] wire _newEntry_sr_T_2 = io_ptw_resp_bits_pte_r_0 | _newEntry_sr_T_1; // @[TLB.scala:318:7] wire _newEntry_sr_T_3 = io_ptw_resp_bits_pte_v_0 & _newEntry_sr_T_2; // @[TLB.scala:318:7] wire _newEntry_sr_T_4 = _newEntry_sr_T_3 & io_ptw_resp_bits_pte_a_0; // @[TLB.scala:318:7] assign _newEntry_sr_T_5 = _newEntry_sr_T_4 & io_ptw_resp_bits_pte_r_0; // @[TLB.scala:318:7] assign newEntry_sr = _newEntry_sr_T_5; // @[TLB.scala:449:24] wire _newEntry_sw_T = ~io_ptw_resp_bits_pte_w_0; // @[TLB.scala:318:7] wire _newEntry_sw_T_1 = io_ptw_resp_bits_pte_x_0 & _newEntry_sw_T; // @[TLB.scala:318:7] wire _newEntry_sw_T_2 = io_ptw_resp_bits_pte_r_0 | _newEntry_sw_T_1; // @[TLB.scala:318:7] wire _newEntry_sw_T_3 = io_ptw_resp_bits_pte_v_0 & _newEntry_sw_T_2; // @[TLB.scala:318:7] wire _newEntry_sw_T_4 = _newEntry_sw_T_3 & io_ptw_resp_bits_pte_a_0; // @[TLB.scala:318:7] wire _newEntry_sw_T_5 = _newEntry_sw_T_4 & io_ptw_resp_bits_pte_w_0; // @[TLB.scala:318:7] assign _newEntry_sw_T_6 = _newEntry_sw_T_5 & io_ptw_resp_bits_pte_d_0; // @[TLB.scala:318:7] assign newEntry_sw = _newEntry_sw_T_6; // @[TLB.scala:449:24] wire _newEntry_sx_T = ~io_ptw_resp_bits_pte_w_0; // @[TLB.scala:318:7] wire _newEntry_sx_T_1 = io_ptw_resp_bits_pte_x_0 & _newEntry_sx_T; // @[TLB.scala:318:7] wire _newEntry_sx_T_2 = io_ptw_resp_bits_pte_r_0 | _newEntry_sx_T_1; // @[TLB.scala:318:7] wire _newEntry_sx_T_3 = io_ptw_resp_bits_pte_v_0 & _newEntry_sx_T_2; // @[TLB.scala:318:7] wire _newEntry_sx_T_4 = _newEntry_sx_T_3 & io_ptw_resp_bits_pte_a_0; // @[TLB.scala:318:7] assign _newEntry_sx_T_5 = _newEntry_sx_T_4 & io_ptw_resp_bits_pte_x_0; // @[TLB.scala:318:7] assign newEntry_sx = _newEntry_sx_T_5; // @[TLB.scala:449:24] wire [1:0] _GEN_29 = {newEntry_c, 1'h0}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_lo_lo_lo; // @[TLB.scala:217:24] assign special_entry_data_0_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_lo_lo_lo; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] superpage_entries_1_data_0_lo_lo_lo; // @[TLB.scala:217:24] assign superpage_entries_1_data_0_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] superpage_entries_2_data_0_lo_lo_lo; // @[TLB.scala:217:24] assign superpage_entries_2_data_0_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] superpage_entries_3_data_0_lo_lo_lo; // @[TLB.scala:217:24] assign superpage_entries_3_data_0_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_0_data_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_0_0_data_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_1_data_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_0_1_data_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_2_data_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_0_2_data_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_3_data_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_0_3_data_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_4_data_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_0_4_data_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_5_data_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_0_5_data_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_6_data_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_0_6_data_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_7_data_lo_lo_lo; // @[TLB.scala:217:24] assign sectored_entries_0_7_data_lo_lo_lo = _GEN_29; // @[TLB.scala:217:24] wire [1:0] _GEN_30 = {newEntry_pal, newEntry_paa}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24] assign special_entry_data_0_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] superpage_entries_1_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_1_data_0_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] superpage_entries_2_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_2_data_0_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] superpage_entries_3_data_0_lo_lo_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_3_data_0_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_0_data_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_0_data_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_1_data_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_1_data_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_2_data_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_2_data_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_3_data_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_3_data_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_4_data_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_4_data_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_5_data_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_5_data_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_6_data_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_6_data_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_7_data_lo_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_7_data_lo_lo_hi_hi = _GEN_30; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_lo_lo_hi = {special_entry_data_0_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] special_entry_data_0_lo_lo = {special_entry_data_0_lo_lo_hi, special_entry_data_0_lo_lo_lo}; // @[TLB.scala:217:24] wire [1:0] _GEN_31 = {newEntry_px, newEntry_pr}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24] assign special_entry_data_0_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] superpage_entries_1_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_1_data_0_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] superpage_entries_2_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_2_data_0_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] superpage_entries_3_data_0_lo_hi_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_3_data_0_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_0_data_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_0_data_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_1_data_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_1_data_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_2_data_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_2_data_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_3_data_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_3_data_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_4_data_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_4_data_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_5_data_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_5_data_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_6_data_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_6_data_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_7_data_lo_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_7_data_lo_hi_lo_hi = _GEN_31; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_lo_hi_lo = {special_entry_data_0_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [1:0] _GEN_32 = {newEntry_hx, newEntry_hr}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_lo_hi_hi_hi; // @[TLB.scala:217:24] assign special_entry_data_0_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_lo_hi_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] superpage_entries_1_data_0_lo_hi_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_1_data_0_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] superpage_entries_2_data_0_lo_hi_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_2_data_0_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] superpage_entries_3_data_0_lo_hi_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_3_data_0_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_0_data_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_0_data_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_1_data_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_1_data_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_2_data_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_2_data_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_3_data_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_3_data_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_4_data_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_4_data_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_5_data_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_5_data_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_6_data_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_6_data_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_7_data_lo_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_7_data_lo_hi_hi_hi = _GEN_32; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_lo_hi_hi = {special_entry_data_0_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] special_entry_data_0_lo_hi = {special_entry_data_0_lo_hi_hi, special_entry_data_0_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] special_entry_data_0_lo = {special_entry_data_0_lo_hi, special_entry_data_0_lo_lo}; // @[TLB.scala:217:24] wire [1:0] _GEN_33 = {newEntry_sx, newEntry_sr}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_hi_lo_lo_hi; // @[TLB.scala:217:24] assign special_entry_data_0_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_hi_lo_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] superpage_entries_1_data_0_hi_lo_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_1_data_0_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] superpage_entries_2_data_0_hi_lo_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_2_data_0_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] superpage_entries_3_data_0_hi_lo_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_3_data_0_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_0_data_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_0_data_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_1_data_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_1_data_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_2_data_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_2_data_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_3_data_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_3_data_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_4_data_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_4_data_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_5_data_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_5_data_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_6_data_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_6_data_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_7_data_hi_lo_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_7_data_hi_lo_lo_hi = _GEN_33; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_hi_lo_lo = {special_entry_data_0_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [1:0] _GEN_34 = {newEntry_pf, newEntry_gf}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_hi_lo_hi_hi; // @[TLB.scala:217:24] assign special_entry_data_0_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_hi_lo_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] superpage_entries_1_data_0_hi_lo_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_1_data_0_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] superpage_entries_2_data_0_hi_lo_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_2_data_0_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] superpage_entries_3_data_0_hi_lo_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_3_data_0_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_0_data_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_0_data_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_1_data_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_1_data_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_2_data_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_2_data_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_3_data_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_3_data_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_4_data_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_4_data_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_5_data_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_5_data_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_6_data_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_6_data_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_7_data_hi_lo_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_7_data_hi_lo_hi_hi = _GEN_34; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_hi_lo_hi = {special_entry_data_0_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] special_entry_data_0_hi_lo = {special_entry_data_0_hi_lo_hi, special_entry_data_0_hi_lo_lo}; // @[TLB.scala:217:24] wire [1:0] _GEN_35 = {newEntry_ae_ptw, newEntry_ae_final}; // @[TLB.scala:217:24, :449:24] wire [1:0] special_entry_data_0_hi_hi_lo_hi; // @[TLB.scala:217:24] assign special_entry_data_0_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] superpage_entries_0_data_0_hi_hi_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] superpage_entries_1_data_0_hi_hi_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_1_data_0_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] superpage_entries_2_data_0_hi_hi_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_2_data_0_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] superpage_entries_3_data_0_hi_hi_lo_hi; // @[TLB.scala:217:24] assign superpage_entries_3_data_0_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_0_data_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_0_data_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_1_data_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_1_data_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_2_data_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_2_data_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_3_data_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_3_data_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_4_data_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_4_data_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_5_data_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_5_data_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_6_data_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_6_data_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [1:0] sectored_entries_0_7_data_hi_hi_lo_hi; // @[TLB.scala:217:24] assign sectored_entries_0_7_data_hi_hi_lo_hi = _GEN_35; // @[TLB.scala:217:24] wire [2:0] special_entry_data_0_hi_hi_lo = {special_entry_data_0_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [20:0] _GEN_36 = {newEntry_ppn, newEntry_u}; // @[TLB.scala:217:24, :449:24] wire [20:0] special_entry_data_0_hi_hi_hi_hi; // @[TLB.scala:217:24] assign special_entry_data_0_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] superpage_entries_0_data_0_hi_hi_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_0_data_0_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] superpage_entries_1_data_0_hi_hi_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_1_data_0_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] superpage_entries_2_data_0_hi_hi_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_2_data_0_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] superpage_entries_3_data_0_hi_hi_hi_hi; // @[TLB.scala:217:24] assign superpage_entries_3_data_0_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] sectored_entries_0_0_data_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_0_data_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] sectored_entries_0_1_data_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_1_data_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] sectored_entries_0_2_data_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_2_data_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] sectored_entries_0_3_data_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_3_data_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] sectored_entries_0_4_data_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_4_data_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] sectored_entries_0_5_data_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_5_data_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] sectored_entries_0_6_data_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_6_data_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [20:0] sectored_entries_0_7_data_hi_hi_hi_hi; // @[TLB.scala:217:24] assign sectored_entries_0_7_data_hi_hi_hi_hi = _GEN_36; // @[TLB.scala:217:24] wire [21:0] special_entry_data_0_hi_hi_hi = {special_entry_data_0_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] special_entry_data_0_hi_hi = {special_entry_data_0_hi_hi_hi, special_entry_data_0_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] special_entry_data_0_hi = {special_entry_data_0_hi_hi, special_entry_data_0_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _special_entry_data_0_T = {special_entry_data_0_hi, special_entry_data_0_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_0_data_0_lo_lo_hi = {superpage_entries_0_data_0_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] superpage_entries_0_data_0_lo_lo = {superpage_entries_0_data_0_lo_lo_hi, superpage_entries_0_data_0_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_0_data_0_lo_hi_lo = {superpage_entries_0_data_0_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] superpage_entries_0_data_0_lo_hi_hi = {superpage_entries_0_data_0_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] superpage_entries_0_data_0_lo_hi = {superpage_entries_0_data_0_lo_hi_hi, superpage_entries_0_data_0_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] superpage_entries_0_data_0_lo = {superpage_entries_0_data_0_lo_hi, superpage_entries_0_data_0_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_0_data_0_hi_lo_lo = {superpage_entries_0_data_0_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] superpage_entries_0_data_0_hi_lo_hi = {superpage_entries_0_data_0_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] superpage_entries_0_data_0_hi_lo = {superpage_entries_0_data_0_hi_lo_hi, superpage_entries_0_data_0_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_0_data_0_hi_hi_lo = {superpage_entries_0_data_0_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] superpage_entries_0_data_0_hi_hi_hi = {superpage_entries_0_data_0_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] superpage_entries_0_data_0_hi_hi = {superpage_entries_0_data_0_hi_hi_hi, superpage_entries_0_data_0_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] superpage_entries_0_data_0_hi = {superpage_entries_0_data_0_hi_hi, superpage_entries_0_data_0_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _superpage_entries_0_data_0_T = {superpage_entries_0_data_0_hi, superpage_entries_0_data_0_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_1_data_0_lo_lo_hi = {superpage_entries_1_data_0_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] superpage_entries_1_data_0_lo_lo = {superpage_entries_1_data_0_lo_lo_hi, superpage_entries_1_data_0_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_1_data_0_lo_hi_lo = {superpage_entries_1_data_0_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] superpage_entries_1_data_0_lo_hi_hi = {superpage_entries_1_data_0_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] superpage_entries_1_data_0_lo_hi = {superpage_entries_1_data_0_lo_hi_hi, superpage_entries_1_data_0_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] superpage_entries_1_data_0_lo = {superpage_entries_1_data_0_lo_hi, superpage_entries_1_data_0_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_1_data_0_hi_lo_lo = {superpage_entries_1_data_0_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] superpage_entries_1_data_0_hi_lo_hi = {superpage_entries_1_data_0_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] superpage_entries_1_data_0_hi_lo = {superpage_entries_1_data_0_hi_lo_hi, superpage_entries_1_data_0_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_1_data_0_hi_hi_lo = {superpage_entries_1_data_0_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] superpage_entries_1_data_0_hi_hi_hi = {superpage_entries_1_data_0_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] superpage_entries_1_data_0_hi_hi = {superpage_entries_1_data_0_hi_hi_hi, superpage_entries_1_data_0_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] superpage_entries_1_data_0_hi = {superpage_entries_1_data_0_hi_hi, superpage_entries_1_data_0_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _superpage_entries_1_data_0_T = {superpage_entries_1_data_0_hi, superpage_entries_1_data_0_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_2_data_0_lo_lo_hi = {superpage_entries_2_data_0_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] superpage_entries_2_data_0_lo_lo = {superpage_entries_2_data_0_lo_lo_hi, superpage_entries_2_data_0_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_2_data_0_lo_hi_lo = {superpage_entries_2_data_0_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] superpage_entries_2_data_0_lo_hi_hi = {superpage_entries_2_data_0_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] superpage_entries_2_data_0_lo_hi = {superpage_entries_2_data_0_lo_hi_hi, superpage_entries_2_data_0_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] superpage_entries_2_data_0_lo = {superpage_entries_2_data_0_lo_hi, superpage_entries_2_data_0_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_2_data_0_hi_lo_lo = {superpage_entries_2_data_0_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] superpage_entries_2_data_0_hi_lo_hi = {superpage_entries_2_data_0_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] superpage_entries_2_data_0_hi_lo = {superpage_entries_2_data_0_hi_lo_hi, superpage_entries_2_data_0_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_2_data_0_hi_hi_lo = {superpage_entries_2_data_0_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] superpage_entries_2_data_0_hi_hi_hi = {superpage_entries_2_data_0_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] superpage_entries_2_data_0_hi_hi = {superpage_entries_2_data_0_hi_hi_hi, superpage_entries_2_data_0_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] superpage_entries_2_data_0_hi = {superpage_entries_2_data_0_hi_hi, superpage_entries_2_data_0_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _superpage_entries_2_data_0_T = {superpage_entries_2_data_0_hi, superpage_entries_2_data_0_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_3_data_0_lo_lo_hi = {superpage_entries_3_data_0_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] superpage_entries_3_data_0_lo_lo = {superpage_entries_3_data_0_lo_lo_hi, superpage_entries_3_data_0_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_3_data_0_lo_hi_lo = {superpage_entries_3_data_0_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] superpage_entries_3_data_0_lo_hi_hi = {superpage_entries_3_data_0_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] superpage_entries_3_data_0_lo_hi = {superpage_entries_3_data_0_lo_hi_hi, superpage_entries_3_data_0_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] superpage_entries_3_data_0_lo = {superpage_entries_3_data_0_lo_hi, superpage_entries_3_data_0_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_3_data_0_hi_lo_lo = {superpage_entries_3_data_0_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] superpage_entries_3_data_0_hi_lo_hi = {superpage_entries_3_data_0_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] superpage_entries_3_data_0_hi_lo = {superpage_entries_3_data_0_hi_lo_hi, superpage_entries_3_data_0_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] superpage_entries_3_data_0_hi_hi_lo = {superpage_entries_3_data_0_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] superpage_entries_3_data_0_hi_hi_hi = {superpage_entries_3_data_0_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] superpage_entries_3_data_0_hi_hi = {superpage_entries_3_data_0_hi_hi_hi, superpage_entries_3_data_0_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] superpage_entries_3_data_0_hi = {superpage_entries_3_data_0_hi_hi, superpage_entries_3_data_0_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _superpage_entries_3_data_0_T = {superpage_entries_3_data_0_hi, superpage_entries_3_data_0_lo}; // @[TLB.scala:217:24] wire [2:0] waddr_1 = r_sectored_hit_valid ? r_sectored_hit_bits : r_sectored_repl_addr; // @[TLB.scala:356:33, :357:27, :485:22] wire [1:0] idx = r_refill_tag[1:0]; // @[package.scala:163:13] wire [1:0] idx_1 = r_refill_tag[1:0]; // @[package.scala:163:13] wire [1:0] idx_2 = r_refill_tag[1:0]; // @[package.scala:163:13] wire [1:0] idx_3 = r_refill_tag[1:0]; // @[package.scala:163:13] wire [1:0] idx_4 = r_refill_tag[1:0]; // @[package.scala:163:13] wire [1:0] idx_5 = r_refill_tag[1:0]; // @[package.scala:163:13] wire [1:0] idx_6 = r_refill_tag[1:0]; // @[package.scala:163:13] wire [1:0] idx_7 = r_refill_tag[1:0]; // @[package.scala:163:13] wire [2:0] sectored_entries_0_0_data_lo_lo_hi = {sectored_entries_0_0_data_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_0_0_data_lo_lo = {sectored_entries_0_0_data_lo_lo_hi, sectored_entries_0_0_data_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_0_data_lo_hi_lo = {sectored_entries_0_0_data_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_0_data_lo_hi_hi = {sectored_entries_0_0_data_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_0_data_lo_hi = {sectored_entries_0_0_data_lo_hi_hi, sectored_entries_0_0_data_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_0_0_data_lo = {sectored_entries_0_0_data_lo_hi, sectored_entries_0_0_data_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_0_data_hi_lo_lo = {sectored_entries_0_0_data_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_0_data_hi_lo_hi = {sectored_entries_0_0_data_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_0_data_hi_lo = {sectored_entries_0_0_data_hi_lo_hi, sectored_entries_0_0_data_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_0_data_hi_hi_lo = {sectored_entries_0_0_data_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_0_0_data_hi_hi_hi = {sectored_entries_0_0_data_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_0_0_data_hi_hi = {sectored_entries_0_0_data_hi_hi_hi, sectored_entries_0_0_data_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_0_0_data_hi = {sectored_entries_0_0_data_hi_hi, sectored_entries_0_0_data_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_0_0_data_T = {sectored_entries_0_0_data_hi, sectored_entries_0_0_data_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_1_data_lo_lo_hi = {sectored_entries_0_1_data_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_0_1_data_lo_lo = {sectored_entries_0_1_data_lo_lo_hi, sectored_entries_0_1_data_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_1_data_lo_hi_lo = {sectored_entries_0_1_data_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_1_data_lo_hi_hi = {sectored_entries_0_1_data_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_1_data_lo_hi = {sectored_entries_0_1_data_lo_hi_hi, sectored_entries_0_1_data_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_0_1_data_lo = {sectored_entries_0_1_data_lo_hi, sectored_entries_0_1_data_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_1_data_hi_lo_lo = {sectored_entries_0_1_data_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_1_data_hi_lo_hi = {sectored_entries_0_1_data_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_1_data_hi_lo = {sectored_entries_0_1_data_hi_lo_hi, sectored_entries_0_1_data_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_1_data_hi_hi_lo = {sectored_entries_0_1_data_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_0_1_data_hi_hi_hi = {sectored_entries_0_1_data_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_0_1_data_hi_hi = {sectored_entries_0_1_data_hi_hi_hi, sectored_entries_0_1_data_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_0_1_data_hi = {sectored_entries_0_1_data_hi_hi, sectored_entries_0_1_data_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_0_1_data_T = {sectored_entries_0_1_data_hi, sectored_entries_0_1_data_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_2_data_lo_lo_hi = {sectored_entries_0_2_data_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_0_2_data_lo_lo = {sectored_entries_0_2_data_lo_lo_hi, sectored_entries_0_2_data_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_2_data_lo_hi_lo = {sectored_entries_0_2_data_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_2_data_lo_hi_hi = {sectored_entries_0_2_data_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_2_data_lo_hi = {sectored_entries_0_2_data_lo_hi_hi, sectored_entries_0_2_data_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_0_2_data_lo = {sectored_entries_0_2_data_lo_hi, sectored_entries_0_2_data_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_2_data_hi_lo_lo = {sectored_entries_0_2_data_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_2_data_hi_lo_hi = {sectored_entries_0_2_data_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_2_data_hi_lo = {sectored_entries_0_2_data_hi_lo_hi, sectored_entries_0_2_data_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_2_data_hi_hi_lo = {sectored_entries_0_2_data_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_0_2_data_hi_hi_hi = {sectored_entries_0_2_data_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_0_2_data_hi_hi = {sectored_entries_0_2_data_hi_hi_hi, sectored_entries_0_2_data_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_0_2_data_hi = {sectored_entries_0_2_data_hi_hi, sectored_entries_0_2_data_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_0_2_data_T = {sectored_entries_0_2_data_hi, sectored_entries_0_2_data_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_3_data_lo_lo_hi = {sectored_entries_0_3_data_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_0_3_data_lo_lo = {sectored_entries_0_3_data_lo_lo_hi, sectored_entries_0_3_data_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_3_data_lo_hi_lo = {sectored_entries_0_3_data_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_3_data_lo_hi_hi = {sectored_entries_0_3_data_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_3_data_lo_hi = {sectored_entries_0_3_data_lo_hi_hi, sectored_entries_0_3_data_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_0_3_data_lo = {sectored_entries_0_3_data_lo_hi, sectored_entries_0_3_data_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_3_data_hi_lo_lo = {sectored_entries_0_3_data_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_3_data_hi_lo_hi = {sectored_entries_0_3_data_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_3_data_hi_lo = {sectored_entries_0_3_data_hi_lo_hi, sectored_entries_0_3_data_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_3_data_hi_hi_lo = {sectored_entries_0_3_data_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_0_3_data_hi_hi_hi = {sectored_entries_0_3_data_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_0_3_data_hi_hi = {sectored_entries_0_3_data_hi_hi_hi, sectored_entries_0_3_data_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_0_3_data_hi = {sectored_entries_0_3_data_hi_hi, sectored_entries_0_3_data_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_0_3_data_T = {sectored_entries_0_3_data_hi, sectored_entries_0_3_data_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_4_data_lo_lo_hi = {sectored_entries_0_4_data_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_0_4_data_lo_lo = {sectored_entries_0_4_data_lo_lo_hi, sectored_entries_0_4_data_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_4_data_lo_hi_lo = {sectored_entries_0_4_data_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_4_data_lo_hi_hi = {sectored_entries_0_4_data_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_4_data_lo_hi = {sectored_entries_0_4_data_lo_hi_hi, sectored_entries_0_4_data_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_0_4_data_lo = {sectored_entries_0_4_data_lo_hi, sectored_entries_0_4_data_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_4_data_hi_lo_lo = {sectored_entries_0_4_data_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_4_data_hi_lo_hi = {sectored_entries_0_4_data_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_4_data_hi_lo = {sectored_entries_0_4_data_hi_lo_hi, sectored_entries_0_4_data_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_4_data_hi_hi_lo = {sectored_entries_0_4_data_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_0_4_data_hi_hi_hi = {sectored_entries_0_4_data_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_0_4_data_hi_hi = {sectored_entries_0_4_data_hi_hi_hi, sectored_entries_0_4_data_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_0_4_data_hi = {sectored_entries_0_4_data_hi_hi, sectored_entries_0_4_data_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_0_4_data_T = {sectored_entries_0_4_data_hi, sectored_entries_0_4_data_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_5_data_lo_lo_hi = {sectored_entries_0_5_data_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_0_5_data_lo_lo = {sectored_entries_0_5_data_lo_lo_hi, sectored_entries_0_5_data_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_5_data_lo_hi_lo = {sectored_entries_0_5_data_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_5_data_lo_hi_hi = {sectored_entries_0_5_data_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_5_data_lo_hi = {sectored_entries_0_5_data_lo_hi_hi, sectored_entries_0_5_data_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_0_5_data_lo = {sectored_entries_0_5_data_lo_hi, sectored_entries_0_5_data_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_5_data_hi_lo_lo = {sectored_entries_0_5_data_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_5_data_hi_lo_hi = {sectored_entries_0_5_data_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_5_data_hi_lo = {sectored_entries_0_5_data_hi_lo_hi, sectored_entries_0_5_data_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_5_data_hi_hi_lo = {sectored_entries_0_5_data_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_0_5_data_hi_hi_hi = {sectored_entries_0_5_data_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_0_5_data_hi_hi = {sectored_entries_0_5_data_hi_hi_hi, sectored_entries_0_5_data_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_0_5_data_hi = {sectored_entries_0_5_data_hi_hi, sectored_entries_0_5_data_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_0_5_data_T = {sectored_entries_0_5_data_hi, sectored_entries_0_5_data_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_6_data_lo_lo_hi = {sectored_entries_0_6_data_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_0_6_data_lo_lo = {sectored_entries_0_6_data_lo_lo_hi, sectored_entries_0_6_data_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_6_data_lo_hi_lo = {sectored_entries_0_6_data_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_6_data_lo_hi_hi = {sectored_entries_0_6_data_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_6_data_lo_hi = {sectored_entries_0_6_data_lo_hi_hi, sectored_entries_0_6_data_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_0_6_data_lo = {sectored_entries_0_6_data_lo_hi, sectored_entries_0_6_data_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_6_data_hi_lo_lo = {sectored_entries_0_6_data_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_6_data_hi_lo_hi = {sectored_entries_0_6_data_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_6_data_hi_lo = {sectored_entries_0_6_data_hi_lo_hi, sectored_entries_0_6_data_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_6_data_hi_hi_lo = {sectored_entries_0_6_data_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_0_6_data_hi_hi_hi = {sectored_entries_0_6_data_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_0_6_data_hi_hi = {sectored_entries_0_6_data_hi_hi_hi, sectored_entries_0_6_data_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_0_6_data_hi = {sectored_entries_0_6_data_hi_hi, sectored_entries_0_6_data_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_0_6_data_T = {sectored_entries_0_6_data_hi, sectored_entries_0_6_data_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_7_data_lo_lo_hi = {sectored_entries_0_7_data_lo_lo_hi_hi, newEntry_eff}; // @[TLB.scala:217:24, :449:24] wire [4:0] sectored_entries_0_7_data_lo_lo = {sectored_entries_0_7_data_lo_lo_hi, sectored_entries_0_7_data_lo_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_7_data_lo_hi_lo = {sectored_entries_0_7_data_lo_hi_lo_hi, newEntry_ppp}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_7_data_lo_hi_hi = {sectored_entries_0_7_data_lo_hi_hi_hi, newEntry_pw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_7_data_lo_hi = {sectored_entries_0_7_data_lo_hi_hi, sectored_entries_0_7_data_lo_hi_lo}; // @[TLB.scala:217:24] wire [10:0] sectored_entries_0_7_data_lo = {sectored_entries_0_7_data_lo_hi, sectored_entries_0_7_data_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_7_data_hi_lo_lo = {sectored_entries_0_7_data_hi_lo_lo_hi, newEntry_hw}; // @[TLB.scala:217:24, :449:24] wire [2:0] sectored_entries_0_7_data_hi_lo_hi = {sectored_entries_0_7_data_hi_lo_hi_hi, newEntry_sw}; // @[TLB.scala:217:24, :449:24] wire [5:0] sectored_entries_0_7_data_hi_lo = {sectored_entries_0_7_data_hi_lo_hi, sectored_entries_0_7_data_hi_lo_lo}; // @[TLB.scala:217:24] wire [2:0] sectored_entries_0_7_data_hi_hi_lo = {sectored_entries_0_7_data_hi_hi_lo_hi, 1'h0}; // @[TLB.scala:217:24] wire [21:0] sectored_entries_0_7_data_hi_hi_hi = {sectored_entries_0_7_data_hi_hi_hi_hi, newEntry_g}; // @[TLB.scala:217:24, :449:24] wire [24:0] sectored_entries_0_7_data_hi_hi = {sectored_entries_0_7_data_hi_hi_hi, sectored_entries_0_7_data_hi_hi_lo}; // @[TLB.scala:217:24] wire [30:0] sectored_entries_0_7_data_hi = {sectored_entries_0_7_data_hi_hi, sectored_entries_0_7_data_hi_lo}; // @[TLB.scala:217:24] wire [41:0] _sectored_entries_0_7_data_T = {sectored_entries_0_7_data_hi, sectored_entries_0_7_data_lo}; // @[TLB.scala:217:24] wire [19:0] _entries_T_23; // @[TLB.scala:170:77] wire _entries_T_22; // @[TLB.scala:170:77] wire _entries_T_21; // @[TLB.scala:170:77] wire _entries_T_20; // @[TLB.scala:170:77] wire _entries_T_19; // @[TLB.scala:170:77] wire _entries_T_18; // @[TLB.scala:170:77] wire _entries_T_17; // @[TLB.scala:170:77] wire _entries_T_16; // @[TLB.scala:170:77] wire _entries_T_15; // @[TLB.scala:170:77] wire _entries_T_14; // @[TLB.scala:170:77] wire _entries_T_13; // @[TLB.scala:170:77] wire _entries_T_12; // @[TLB.scala:170:77] wire _entries_T_11; // @[TLB.scala:170:77] wire _entries_T_10; // @[TLB.scala:170:77] wire _entries_T_9; // @[TLB.scala:170:77] wire _entries_T_8; // @[TLB.scala:170:77] wire _entries_T_7; // @[TLB.scala:170:77] wire _entries_T_6; // @[TLB.scala:170:77] wire _entries_T_5; // @[TLB.scala:170:77] wire _entries_T_4; // @[TLB.scala:170:77] wire _entries_T_3; // @[TLB.scala:170:77] wire _entries_T_2; // @[TLB.scala:170:77] wire _entries_T_1; // @[TLB.scala:170:77] wire [3:0][41:0] _GEN_37 = {{sectored_entries_0_0_data_3}, {sectored_entries_0_0_data_2}, {sectored_entries_0_0_data_1}, {sectored_entries_0_0_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] _entries_WIRE_1 = _GEN_37[_entries_T]; // @[package.scala:163:13] assign _entries_T_1 = _entries_WIRE_1[0]; // @[TLB.scala:170:77] wire _entries_WIRE_fragmented_superpage = _entries_T_1; // @[TLB.scala:170:77] assign _entries_T_2 = _entries_WIRE_1[1]; // @[TLB.scala:170:77] wire _entries_WIRE_c = _entries_T_2; // @[TLB.scala:170:77] assign _entries_T_3 = _entries_WIRE_1[2]; // @[TLB.scala:170:77] wire _entries_WIRE_eff = _entries_T_3; // @[TLB.scala:170:77] assign _entries_T_4 = _entries_WIRE_1[3]; // @[TLB.scala:170:77] wire _entries_WIRE_paa = _entries_T_4; // @[TLB.scala:170:77] assign _entries_T_5 = _entries_WIRE_1[4]; // @[TLB.scala:170:77] wire _entries_WIRE_pal = _entries_T_5; // @[TLB.scala:170:77] assign _entries_T_6 = _entries_WIRE_1[5]; // @[TLB.scala:170:77] wire _entries_WIRE_ppp = _entries_T_6; // @[TLB.scala:170:77] assign _entries_T_7 = _entries_WIRE_1[6]; // @[TLB.scala:170:77] wire _entries_WIRE_pr = _entries_T_7; // @[TLB.scala:170:77] assign _entries_T_8 = _entries_WIRE_1[7]; // @[TLB.scala:170:77] wire _entries_WIRE_px = _entries_T_8; // @[TLB.scala:170:77] assign _entries_T_9 = _entries_WIRE_1[8]; // @[TLB.scala:170:77] wire _entries_WIRE_pw = _entries_T_9; // @[TLB.scala:170:77] assign _entries_T_10 = _entries_WIRE_1[9]; // @[TLB.scala:170:77] wire _entries_WIRE_hr = _entries_T_10; // @[TLB.scala:170:77] assign _entries_T_11 = _entries_WIRE_1[10]; // @[TLB.scala:170:77] wire _entries_WIRE_hx = _entries_T_11; // @[TLB.scala:170:77] assign _entries_T_12 = _entries_WIRE_1[11]; // @[TLB.scala:170:77] wire _entries_WIRE_hw = _entries_T_12; // @[TLB.scala:170:77] assign _entries_T_13 = _entries_WIRE_1[12]; // @[TLB.scala:170:77] wire _entries_WIRE_sr = _entries_T_13; // @[TLB.scala:170:77] assign _entries_T_14 = _entries_WIRE_1[13]; // @[TLB.scala:170:77] wire _entries_WIRE_sx = _entries_T_14; // @[TLB.scala:170:77] assign _entries_T_15 = _entries_WIRE_1[14]; // @[TLB.scala:170:77] wire _entries_WIRE_sw = _entries_T_15; // @[TLB.scala:170:77] assign _entries_T_16 = _entries_WIRE_1[15]; // @[TLB.scala:170:77] wire _entries_WIRE_gf = _entries_T_16; // @[TLB.scala:170:77] assign _entries_T_17 = _entries_WIRE_1[16]; // @[TLB.scala:170:77] wire _entries_WIRE_pf = _entries_T_17; // @[TLB.scala:170:77] assign _entries_T_18 = _entries_WIRE_1[17]; // @[TLB.scala:170:77] wire _entries_WIRE_ae_stage2 = _entries_T_18; // @[TLB.scala:170:77] assign _entries_T_19 = _entries_WIRE_1[18]; // @[TLB.scala:170:77] wire _entries_WIRE_ae_final = _entries_T_19; // @[TLB.scala:170:77] assign _entries_T_20 = _entries_WIRE_1[19]; // @[TLB.scala:170:77] wire _entries_WIRE_ae_ptw = _entries_T_20; // @[TLB.scala:170:77] assign _entries_T_21 = _entries_WIRE_1[20]; // @[TLB.scala:170:77] wire _entries_WIRE_g = _entries_T_21; // @[TLB.scala:170:77] assign _entries_T_22 = _entries_WIRE_1[21]; // @[TLB.scala:170:77] wire _entries_WIRE_u = _entries_T_22; // @[TLB.scala:170:77] assign _entries_T_23 = _entries_WIRE_1[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_ppn = _entries_T_23; // @[TLB.scala:170:77] wire [19:0] _entries_T_47; // @[TLB.scala:170:77] wire _entries_T_46; // @[TLB.scala:170:77] wire _entries_T_45; // @[TLB.scala:170:77] wire _entries_T_44; // @[TLB.scala:170:77] wire _entries_T_43; // @[TLB.scala:170:77] wire _entries_T_42; // @[TLB.scala:170:77] wire _entries_T_41; // @[TLB.scala:170:77] wire _entries_T_40; // @[TLB.scala:170:77] wire _entries_T_39; // @[TLB.scala:170:77] wire _entries_T_38; // @[TLB.scala:170:77] wire _entries_T_37; // @[TLB.scala:170:77] wire _entries_T_36; // @[TLB.scala:170:77] wire _entries_T_35; // @[TLB.scala:170:77] wire _entries_T_34; // @[TLB.scala:170:77] wire _entries_T_33; // @[TLB.scala:170:77] wire _entries_T_32; // @[TLB.scala:170:77] wire _entries_T_31; // @[TLB.scala:170:77] wire _entries_T_30; // @[TLB.scala:170:77] wire _entries_T_29; // @[TLB.scala:170:77] wire _entries_T_28; // @[TLB.scala:170:77] wire _entries_T_27; // @[TLB.scala:170:77] wire _entries_T_26; // @[TLB.scala:170:77] wire _entries_T_25; // @[TLB.scala:170:77] wire [3:0][41:0] _GEN_38 = {{sectored_entries_0_1_data_3}, {sectored_entries_0_1_data_2}, {sectored_entries_0_1_data_1}, {sectored_entries_0_1_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] _entries_WIRE_3 = _GEN_38[_entries_T_24]; // @[package.scala:163:13] assign _entries_T_25 = _entries_WIRE_3[0]; // @[TLB.scala:170:77] wire _entries_WIRE_2_fragmented_superpage = _entries_T_25; // @[TLB.scala:170:77] assign _entries_T_26 = _entries_WIRE_3[1]; // @[TLB.scala:170:77] wire _entries_WIRE_2_c = _entries_T_26; // @[TLB.scala:170:77] assign _entries_T_27 = _entries_WIRE_3[2]; // @[TLB.scala:170:77] wire _entries_WIRE_2_eff = _entries_T_27; // @[TLB.scala:170:77] assign _entries_T_28 = _entries_WIRE_3[3]; // @[TLB.scala:170:77] wire _entries_WIRE_2_paa = _entries_T_28; // @[TLB.scala:170:77] assign _entries_T_29 = _entries_WIRE_3[4]; // @[TLB.scala:170:77] wire _entries_WIRE_2_pal = _entries_T_29; // @[TLB.scala:170:77] assign _entries_T_30 = _entries_WIRE_3[5]; // @[TLB.scala:170:77] wire _entries_WIRE_2_ppp = _entries_T_30; // @[TLB.scala:170:77] assign _entries_T_31 = _entries_WIRE_3[6]; // @[TLB.scala:170:77] wire _entries_WIRE_2_pr = _entries_T_31; // @[TLB.scala:170:77] assign _entries_T_32 = _entries_WIRE_3[7]; // @[TLB.scala:170:77] wire _entries_WIRE_2_px = _entries_T_32; // @[TLB.scala:170:77] assign _entries_T_33 = _entries_WIRE_3[8]; // @[TLB.scala:170:77] wire _entries_WIRE_2_pw = _entries_T_33; // @[TLB.scala:170:77] assign _entries_T_34 = _entries_WIRE_3[9]; // @[TLB.scala:170:77] wire _entries_WIRE_2_hr = _entries_T_34; // @[TLB.scala:170:77] assign _entries_T_35 = _entries_WIRE_3[10]; // @[TLB.scala:170:77] wire _entries_WIRE_2_hx = _entries_T_35; // @[TLB.scala:170:77] assign _entries_T_36 = _entries_WIRE_3[11]; // @[TLB.scala:170:77] wire _entries_WIRE_2_hw = _entries_T_36; // @[TLB.scala:170:77] assign _entries_T_37 = _entries_WIRE_3[12]; // @[TLB.scala:170:77] wire _entries_WIRE_2_sr = _entries_T_37; // @[TLB.scala:170:77] assign _entries_T_38 = _entries_WIRE_3[13]; // @[TLB.scala:170:77] wire _entries_WIRE_2_sx = _entries_T_38; // @[TLB.scala:170:77] assign _entries_T_39 = _entries_WIRE_3[14]; // @[TLB.scala:170:77] wire _entries_WIRE_2_sw = _entries_T_39; // @[TLB.scala:170:77] assign _entries_T_40 = _entries_WIRE_3[15]; // @[TLB.scala:170:77] wire _entries_WIRE_2_gf = _entries_T_40; // @[TLB.scala:170:77] assign _entries_T_41 = _entries_WIRE_3[16]; // @[TLB.scala:170:77] wire _entries_WIRE_2_pf = _entries_T_41; // @[TLB.scala:170:77] assign _entries_T_42 = _entries_WIRE_3[17]; // @[TLB.scala:170:77] wire _entries_WIRE_2_ae_stage2 = _entries_T_42; // @[TLB.scala:170:77] assign _entries_T_43 = _entries_WIRE_3[18]; // @[TLB.scala:170:77] wire _entries_WIRE_2_ae_final = _entries_T_43; // @[TLB.scala:170:77] assign _entries_T_44 = _entries_WIRE_3[19]; // @[TLB.scala:170:77] wire _entries_WIRE_2_ae_ptw = _entries_T_44; // @[TLB.scala:170:77] assign _entries_T_45 = _entries_WIRE_3[20]; // @[TLB.scala:170:77] wire _entries_WIRE_2_g = _entries_T_45; // @[TLB.scala:170:77] assign _entries_T_46 = _entries_WIRE_3[21]; // @[TLB.scala:170:77] wire _entries_WIRE_2_u = _entries_T_46; // @[TLB.scala:170:77] assign _entries_T_47 = _entries_WIRE_3[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_2_ppn = _entries_T_47; // @[TLB.scala:170:77] wire [19:0] _entries_T_71; // @[TLB.scala:170:77] wire _entries_T_70; // @[TLB.scala:170:77] wire _entries_T_69; // @[TLB.scala:170:77] wire _entries_T_68; // @[TLB.scala:170:77] wire _entries_T_67; // @[TLB.scala:170:77] wire _entries_T_66; // @[TLB.scala:170:77] wire _entries_T_65; // @[TLB.scala:170:77] wire _entries_T_64; // @[TLB.scala:170:77] wire _entries_T_63; // @[TLB.scala:170:77] wire _entries_T_62; // @[TLB.scala:170:77] wire _entries_T_61; // @[TLB.scala:170:77] wire _entries_T_60; // @[TLB.scala:170:77] wire _entries_T_59; // @[TLB.scala:170:77] wire _entries_T_58; // @[TLB.scala:170:77] wire _entries_T_57; // @[TLB.scala:170:77] wire _entries_T_56; // @[TLB.scala:170:77] wire _entries_T_55; // @[TLB.scala:170:77] wire _entries_T_54; // @[TLB.scala:170:77] wire _entries_T_53; // @[TLB.scala:170:77] wire _entries_T_52; // @[TLB.scala:170:77] wire _entries_T_51; // @[TLB.scala:170:77] wire _entries_T_50; // @[TLB.scala:170:77] wire _entries_T_49; // @[TLB.scala:170:77] wire [3:0][41:0] _GEN_39 = {{sectored_entries_0_2_data_3}, {sectored_entries_0_2_data_2}, {sectored_entries_0_2_data_1}, {sectored_entries_0_2_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] _entries_WIRE_5 = _GEN_39[_entries_T_48]; // @[package.scala:163:13] assign _entries_T_49 = _entries_WIRE_5[0]; // @[TLB.scala:170:77] wire _entries_WIRE_4_fragmented_superpage = _entries_T_49; // @[TLB.scala:170:77] assign _entries_T_50 = _entries_WIRE_5[1]; // @[TLB.scala:170:77] wire _entries_WIRE_4_c = _entries_T_50; // @[TLB.scala:170:77] assign _entries_T_51 = _entries_WIRE_5[2]; // @[TLB.scala:170:77] wire _entries_WIRE_4_eff = _entries_T_51; // @[TLB.scala:170:77] assign _entries_T_52 = _entries_WIRE_5[3]; // @[TLB.scala:170:77] wire _entries_WIRE_4_paa = _entries_T_52; // @[TLB.scala:170:77] assign _entries_T_53 = _entries_WIRE_5[4]; // @[TLB.scala:170:77] wire _entries_WIRE_4_pal = _entries_T_53; // @[TLB.scala:170:77] assign _entries_T_54 = _entries_WIRE_5[5]; // @[TLB.scala:170:77] wire _entries_WIRE_4_ppp = _entries_T_54; // @[TLB.scala:170:77] assign _entries_T_55 = _entries_WIRE_5[6]; // @[TLB.scala:170:77] wire _entries_WIRE_4_pr = _entries_T_55; // @[TLB.scala:170:77] assign _entries_T_56 = _entries_WIRE_5[7]; // @[TLB.scala:170:77] wire _entries_WIRE_4_px = _entries_T_56; // @[TLB.scala:170:77] assign _entries_T_57 = _entries_WIRE_5[8]; // @[TLB.scala:170:77] wire _entries_WIRE_4_pw = _entries_T_57; // @[TLB.scala:170:77] assign _entries_T_58 = _entries_WIRE_5[9]; // @[TLB.scala:170:77] wire _entries_WIRE_4_hr = _entries_T_58; // @[TLB.scala:170:77] assign _entries_T_59 = _entries_WIRE_5[10]; // @[TLB.scala:170:77] wire _entries_WIRE_4_hx = _entries_T_59; // @[TLB.scala:170:77] assign _entries_T_60 = _entries_WIRE_5[11]; // @[TLB.scala:170:77] wire _entries_WIRE_4_hw = _entries_T_60; // @[TLB.scala:170:77] assign _entries_T_61 = _entries_WIRE_5[12]; // @[TLB.scala:170:77] wire _entries_WIRE_4_sr = _entries_T_61; // @[TLB.scala:170:77] assign _entries_T_62 = _entries_WIRE_5[13]; // @[TLB.scala:170:77] wire _entries_WIRE_4_sx = _entries_T_62; // @[TLB.scala:170:77] assign _entries_T_63 = _entries_WIRE_5[14]; // @[TLB.scala:170:77] wire _entries_WIRE_4_sw = _entries_T_63; // @[TLB.scala:170:77] assign _entries_T_64 = _entries_WIRE_5[15]; // @[TLB.scala:170:77] wire _entries_WIRE_4_gf = _entries_T_64; // @[TLB.scala:170:77] assign _entries_T_65 = _entries_WIRE_5[16]; // @[TLB.scala:170:77] wire _entries_WIRE_4_pf = _entries_T_65; // @[TLB.scala:170:77] assign _entries_T_66 = _entries_WIRE_5[17]; // @[TLB.scala:170:77] wire _entries_WIRE_4_ae_stage2 = _entries_T_66; // @[TLB.scala:170:77] assign _entries_T_67 = _entries_WIRE_5[18]; // @[TLB.scala:170:77] wire _entries_WIRE_4_ae_final = _entries_T_67; // @[TLB.scala:170:77] assign _entries_T_68 = _entries_WIRE_5[19]; // @[TLB.scala:170:77] wire _entries_WIRE_4_ae_ptw = _entries_T_68; // @[TLB.scala:170:77] assign _entries_T_69 = _entries_WIRE_5[20]; // @[TLB.scala:170:77] wire _entries_WIRE_4_g = _entries_T_69; // @[TLB.scala:170:77] assign _entries_T_70 = _entries_WIRE_5[21]; // @[TLB.scala:170:77] wire _entries_WIRE_4_u = _entries_T_70; // @[TLB.scala:170:77] assign _entries_T_71 = _entries_WIRE_5[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_4_ppn = _entries_T_71; // @[TLB.scala:170:77] wire [19:0] _entries_T_95; // @[TLB.scala:170:77] wire _entries_T_94; // @[TLB.scala:170:77] wire _entries_T_93; // @[TLB.scala:170:77] wire _entries_T_92; // @[TLB.scala:170:77] wire _entries_T_91; // @[TLB.scala:170:77] wire _entries_T_90; // @[TLB.scala:170:77] wire _entries_T_89; // @[TLB.scala:170:77] wire _entries_T_88; // @[TLB.scala:170:77] wire _entries_T_87; // @[TLB.scala:170:77] wire _entries_T_86; // @[TLB.scala:170:77] wire _entries_T_85; // @[TLB.scala:170:77] wire _entries_T_84; // @[TLB.scala:170:77] wire _entries_T_83; // @[TLB.scala:170:77] wire _entries_T_82; // @[TLB.scala:170:77] wire _entries_T_81; // @[TLB.scala:170:77] wire _entries_T_80; // @[TLB.scala:170:77] wire _entries_T_79; // @[TLB.scala:170:77] wire _entries_T_78; // @[TLB.scala:170:77] wire _entries_T_77; // @[TLB.scala:170:77] wire _entries_T_76; // @[TLB.scala:170:77] wire _entries_T_75; // @[TLB.scala:170:77] wire _entries_T_74; // @[TLB.scala:170:77] wire _entries_T_73; // @[TLB.scala:170:77] wire [3:0][41:0] _GEN_40 = {{sectored_entries_0_3_data_3}, {sectored_entries_0_3_data_2}, {sectored_entries_0_3_data_1}, {sectored_entries_0_3_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] _entries_WIRE_7 = _GEN_40[_entries_T_72]; // @[package.scala:163:13] assign _entries_T_73 = _entries_WIRE_7[0]; // @[TLB.scala:170:77] wire _entries_WIRE_6_fragmented_superpage = _entries_T_73; // @[TLB.scala:170:77] assign _entries_T_74 = _entries_WIRE_7[1]; // @[TLB.scala:170:77] wire _entries_WIRE_6_c = _entries_T_74; // @[TLB.scala:170:77] assign _entries_T_75 = _entries_WIRE_7[2]; // @[TLB.scala:170:77] wire _entries_WIRE_6_eff = _entries_T_75; // @[TLB.scala:170:77] assign _entries_T_76 = _entries_WIRE_7[3]; // @[TLB.scala:170:77] wire _entries_WIRE_6_paa = _entries_T_76; // @[TLB.scala:170:77] assign _entries_T_77 = _entries_WIRE_7[4]; // @[TLB.scala:170:77] wire _entries_WIRE_6_pal = _entries_T_77; // @[TLB.scala:170:77] assign _entries_T_78 = _entries_WIRE_7[5]; // @[TLB.scala:170:77] wire _entries_WIRE_6_ppp = _entries_T_78; // @[TLB.scala:170:77] assign _entries_T_79 = _entries_WIRE_7[6]; // @[TLB.scala:170:77] wire _entries_WIRE_6_pr = _entries_T_79; // @[TLB.scala:170:77] assign _entries_T_80 = _entries_WIRE_7[7]; // @[TLB.scala:170:77] wire _entries_WIRE_6_px = _entries_T_80; // @[TLB.scala:170:77] assign _entries_T_81 = _entries_WIRE_7[8]; // @[TLB.scala:170:77] wire _entries_WIRE_6_pw = _entries_T_81; // @[TLB.scala:170:77] assign _entries_T_82 = _entries_WIRE_7[9]; // @[TLB.scala:170:77] wire _entries_WIRE_6_hr = _entries_T_82; // @[TLB.scala:170:77] assign _entries_T_83 = _entries_WIRE_7[10]; // @[TLB.scala:170:77] wire _entries_WIRE_6_hx = _entries_T_83; // @[TLB.scala:170:77] assign _entries_T_84 = _entries_WIRE_7[11]; // @[TLB.scala:170:77] wire _entries_WIRE_6_hw = _entries_T_84; // @[TLB.scala:170:77] assign _entries_T_85 = _entries_WIRE_7[12]; // @[TLB.scala:170:77] wire _entries_WIRE_6_sr = _entries_T_85; // @[TLB.scala:170:77] assign _entries_T_86 = _entries_WIRE_7[13]; // @[TLB.scala:170:77] wire _entries_WIRE_6_sx = _entries_T_86; // @[TLB.scala:170:77] assign _entries_T_87 = _entries_WIRE_7[14]; // @[TLB.scala:170:77] wire _entries_WIRE_6_sw = _entries_T_87; // @[TLB.scala:170:77] assign _entries_T_88 = _entries_WIRE_7[15]; // @[TLB.scala:170:77] wire _entries_WIRE_6_gf = _entries_T_88; // @[TLB.scala:170:77] assign _entries_T_89 = _entries_WIRE_7[16]; // @[TLB.scala:170:77] wire _entries_WIRE_6_pf = _entries_T_89; // @[TLB.scala:170:77] assign _entries_T_90 = _entries_WIRE_7[17]; // @[TLB.scala:170:77] wire _entries_WIRE_6_ae_stage2 = _entries_T_90; // @[TLB.scala:170:77] assign _entries_T_91 = _entries_WIRE_7[18]; // @[TLB.scala:170:77] wire _entries_WIRE_6_ae_final = _entries_T_91; // @[TLB.scala:170:77] assign _entries_T_92 = _entries_WIRE_7[19]; // @[TLB.scala:170:77] wire _entries_WIRE_6_ae_ptw = _entries_T_92; // @[TLB.scala:170:77] assign _entries_T_93 = _entries_WIRE_7[20]; // @[TLB.scala:170:77] wire _entries_WIRE_6_g = _entries_T_93; // @[TLB.scala:170:77] assign _entries_T_94 = _entries_WIRE_7[21]; // @[TLB.scala:170:77] wire _entries_WIRE_6_u = _entries_T_94; // @[TLB.scala:170:77] assign _entries_T_95 = _entries_WIRE_7[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_6_ppn = _entries_T_95; // @[TLB.scala:170:77] wire [19:0] _entries_T_119; // @[TLB.scala:170:77] wire _entries_T_118; // @[TLB.scala:170:77] wire _entries_T_117; // @[TLB.scala:170:77] wire _entries_T_116; // @[TLB.scala:170:77] wire _entries_T_115; // @[TLB.scala:170:77] wire _entries_T_114; // @[TLB.scala:170:77] wire _entries_T_113; // @[TLB.scala:170:77] wire _entries_T_112; // @[TLB.scala:170:77] wire _entries_T_111; // @[TLB.scala:170:77] wire _entries_T_110; // @[TLB.scala:170:77] wire _entries_T_109; // @[TLB.scala:170:77] wire _entries_T_108; // @[TLB.scala:170:77] wire _entries_T_107; // @[TLB.scala:170:77] wire _entries_T_106; // @[TLB.scala:170:77] wire _entries_T_105; // @[TLB.scala:170:77] wire _entries_T_104; // @[TLB.scala:170:77] wire _entries_T_103; // @[TLB.scala:170:77] wire _entries_T_102; // @[TLB.scala:170:77] wire _entries_T_101; // @[TLB.scala:170:77] wire _entries_T_100; // @[TLB.scala:170:77] wire _entries_T_99; // @[TLB.scala:170:77] wire _entries_T_98; // @[TLB.scala:170:77] wire _entries_T_97; // @[TLB.scala:170:77] wire [3:0][41:0] _GEN_41 = {{sectored_entries_0_4_data_3}, {sectored_entries_0_4_data_2}, {sectored_entries_0_4_data_1}, {sectored_entries_0_4_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] _entries_WIRE_9 = _GEN_41[_entries_T_96]; // @[package.scala:163:13] assign _entries_T_97 = _entries_WIRE_9[0]; // @[TLB.scala:170:77] wire _entries_WIRE_8_fragmented_superpage = _entries_T_97; // @[TLB.scala:170:77] assign _entries_T_98 = _entries_WIRE_9[1]; // @[TLB.scala:170:77] wire _entries_WIRE_8_c = _entries_T_98; // @[TLB.scala:170:77] assign _entries_T_99 = _entries_WIRE_9[2]; // @[TLB.scala:170:77] wire _entries_WIRE_8_eff = _entries_T_99; // @[TLB.scala:170:77] assign _entries_T_100 = _entries_WIRE_9[3]; // @[TLB.scala:170:77] wire _entries_WIRE_8_paa = _entries_T_100; // @[TLB.scala:170:77] assign _entries_T_101 = _entries_WIRE_9[4]; // @[TLB.scala:170:77] wire _entries_WIRE_8_pal = _entries_T_101; // @[TLB.scala:170:77] assign _entries_T_102 = _entries_WIRE_9[5]; // @[TLB.scala:170:77] wire _entries_WIRE_8_ppp = _entries_T_102; // @[TLB.scala:170:77] assign _entries_T_103 = _entries_WIRE_9[6]; // @[TLB.scala:170:77] wire _entries_WIRE_8_pr = _entries_T_103; // @[TLB.scala:170:77] assign _entries_T_104 = _entries_WIRE_9[7]; // @[TLB.scala:170:77] wire _entries_WIRE_8_px = _entries_T_104; // @[TLB.scala:170:77] assign _entries_T_105 = _entries_WIRE_9[8]; // @[TLB.scala:170:77] wire _entries_WIRE_8_pw = _entries_T_105; // @[TLB.scala:170:77] assign _entries_T_106 = _entries_WIRE_9[9]; // @[TLB.scala:170:77] wire _entries_WIRE_8_hr = _entries_T_106; // @[TLB.scala:170:77] assign _entries_T_107 = _entries_WIRE_9[10]; // @[TLB.scala:170:77] wire _entries_WIRE_8_hx = _entries_T_107; // @[TLB.scala:170:77] assign _entries_T_108 = _entries_WIRE_9[11]; // @[TLB.scala:170:77] wire _entries_WIRE_8_hw = _entries_T_108; // @[TLB.scala:170:77] assign _entries_T_109 = _entries_WIRE_9[12]; // @[TLB.scala:170:77] wire _entries_WIRE_8_sr = _entries_T_109; // @[TLB.scala:170:77] assign _entries_T_110 = _entries_WIRE_9[13]; // @[TLB.scala:170:77] wire _entries_WIRE_8_sx = _entries_T_110; // @[TLB.scala:170:77] assign _entries_T_111 = _entries_WIRE_9[14]; // @[TLB.scala:170:77] wire _entries_WIRE_8_sw = _entries_T_111; // @[TLB.scala:170:77] assign _entries_T_112 = _entries_WIRE_9[15]; // @[TLB.scala:170:77] wire _entries_WIRE_8_gf = _entries_T_112; // @[TLB.scala:170:77] assign _entries_T_113 = _entries_WIRE_9[16]; // @[TLB.scala:170:77] wire _entries_WIRE_8_pf = _entries_T_113; // @[TLB.scala:170:77] assign _entries_T_114 = _entries_WIRE_9[17]; // @[TLB.scala:170:77] wire _entries_WIRE_8_ae_stage2 = _entries_T_114; // @[TLB.scala:170:77] assign _entries_T_115 = _entries_WIRE_9[18]; // @[TLB.scala:170:77] wire _entries_WIRE_8_ae_final = _entries_T_115; // @[TLB.scala:170:77] assign _entries_T_116 = _entries_WIRE_9[19]; // @[TLB.scala:170:77] wire _entries_WIRE_8_ae_ptw = _entries_T_116; // @[TLB.scala:170:77] assign _entries_T_117 = _entries_WIRE_9[20]; // @[TLB.scala:170:77] wire _entries_WIRE_8_g = _entries_T_117; // @[TLB.scala:170:77] assign _entries_T_118 = _entries_WIRE_9[21]; // @[TLB.scala:170:77] wire _entries_WIRE_8_u = _entries_T_118; // @[TLB.scala:170:77] assign _entries_T_119 = _entries_WIRE_9[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_8_ppn = _entries_T_119; // @[TLB.scala:170:77] wire [19:0] _entries_T_143; // @[TLB.scala:170:77] wire _entries_T_142; // @[TLB.scala:170:77] wire _entries_T_141; // @[TLB.scala:170:77] wire _entries_T_140; // @[TLB.scala:170:77] wire _entries_T_139; // @[TLB.scala:170:77] wire _entries_T_138; // @[TLB.scala:170:77] wire _entries_T_137; // @[TLB.scala:170:77] wire _entries_T_136; // @[TLB.scala:170:77] wire _entries_T_135; // @[TLB.scala:170:77] wire _entries_T_134; // @[TLB.scala:170:77] wire _entries_T_133; // @[TLB.scala:170:77] wire _entries_T_132; // @[TLB.scala:170:77] wire _entries_T_131; // @[TLB.scala:170:77] wire _entries_T_130; // @[TLB.scala:170:77] wire _entries_T_129; // @[TLB.scala:170:77] wire _entries_T_128; // @[TLB.scala:170:77] wire _entries_T_127; // @[TLB.scala:170:77] wire _entries_T_126; // @[TLB.scala:170:77] wire _entries_T_125; // @[TLB.scala:170:77] wire _entries_T_124; // @[TLB.scala:170:77] wire _entries_T_123; // @[TLB.scala:170:77] wire _entries_T_122; // @[TLB.scala:170:77] wire _entries_T_121; // @[TLB.scala:170:77] wire [3:0][41:0] _GEN_42 = {{sectored_entries_0_5_data_3}, {sectored_entries_0_5_data_2}, {sectored_entries_0_5_data_1}, {sectored_entries_0_5_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] _entries_WIRE_11 = _GEN_42[_entries_T_120]; // @[package.scala:163:13] assign _entries_T_121 = _entries_WIRE_11[0]; // @[TLB.scala:170:77] wire _entries_WIRE_10_fragmented_superpage = _entries_T_121; // @[TLB.scala:170:77] assign _entries_T_122 = _entries_WIRE_11[1]; // @[TLB.scala:170:77] wire _entries_WIRE_10_c = _entries_T_122; // @[TLB.scala:170:77] assign _entries_T_123 = _entries_WIRE_11[2]; // @[TLB.scala:170:77] wire _entries_WIRE_10_eff = _entries_T_123; // @[TLB.scala:170:77] assign _entries_T_124 = _entries_WIRE_11[3]; // @[TLB.scala:170:77] wire _entries_WIRE_10_paa = _entries_T_124; // @[TLB.scala:170:77] assign _entries_T_125 = _entries_WIRE_11[4]; // @[TLB.scala:170:77] wire _entries_WIRE_10_pal = _entries_T_125; // @[TLB.scala:170:77] assign _entries_T_126 = _entries_WIRE_11[5]; // @[TLB.scala:170:77] wire _entries_WIRE_10_ppp = _entries_T_126; // @[TLB.scala:170:77] assign _entries_T_127 = _entries_WIRE_11[6]; // @[TLB.scala:170:77] wire _entries_WIRE_10_pr = _entries_T_127; // @[TLB.scala:170:77] assign _entries_T_128 = _entries_WIRE_11[7]; // @[TLB.scala:170:77] wire _entries_WIRE_10_px = _entries_T_128; // @[TLB.scala:170:77] assign _entries_T_129 = _entries_WIRE_11[8]; // @[TLB.scala:170:77] wire _entries_WIRE_10_pw = _entries_T_129; // @[TLB.scala:170:77] assign _entries_T_130 = _entries_WIRE_11[9]; // @[TLB.scala:170:77] wire _entries_WIRE_10_hr = _entries_T_130; // @[TLB.scala:170:77] assign _entries_T_131 = _entries_WIRE_11[10]; // @[TLB.scala:170:77] wire _entries_WIRE_10_hx = _entries_T_131; // @[TLB.scala:170:77] assign _entries_T_132 = _entries_WIRE_11[11]; // @[TLB.scala:170:77] wire _entries_WIRE_10_hw = _entries_T_132; // @[TLB.scala:170:77] assign _entries_T_133 = _entries_WIRE_11[12]; // @[TLB.scala:170:77] wire _entries_WIRE_10_sr = _entries_T_133; // @[TLB.scala:170:77] assign _entries_T_134 = _entries_WIRE_11[13]; // @[TLB.scala:170:77] wire _entries_WIRE_10_sx = _entries_T_134; // @[TLB.scala:170:77] assign _entries_T_135 = _entries_WIRE_11[14]; // @[TLB.scala:170:77] wire _entries_WIRE_10_sw = _entries_T_135; // @[TLB.scala:170:77] assign _entries_T_136 = _entries_WIRE_11[15]; // @[TLB.scala:170:77] wire _entries_WIRE_10_gf = _entries_T_136; // @[TLB.scala:170:77] assign _entries_T_137 = _entries_WIRE_11[16]; // @[TLB.scala:170:77] wire _entries_WIRE_10_pf = _entries_T_137; // @[TLB.scala:170:77] assign _entries_T_138 = _entries_WIRE_11[17]; // @[TLB.scala:170:77] wire _entries_WIRE_10_ae_stage2 = _entries_T_138; // @[TLB.scala:170:77] assign _entries_T_139 = _entries_WIRE_11[18]; // @[TLB.scala:170:77] wire _entries_WIRE_10_ae_final = _entries_T_139; // @[TLB.scala:170:77] assign _entries_T_140 = _entries_WIRE_11[19]; // @[TLB.scala:170:77] wire _entries_WIRE_10_ae_ptw = _entries_T_140; // @[TLB.scala:170:77] assign _entries_T_141 = _entries_WIRE_11[20]; // @[TLB.scala:170:77] wire _entries_WIRE_10_g = _entries_T_141; // @[TLB.scala:170:77] assign _entries_T_142 = _entries_WIRE_11[21]; // @[TLB.scala:170:77] wire _entries_WIRE_10_u = _entries_T_142; // @[TLB.scala:170:77] assign _entries_T_143 = _entries_WIRE_11[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_10_ppn = _entries_T_143; // @[TLB.scala:170:77] wire [19:0] _entries_T_167; // @[TLB.scala:170:77] wire _entries_T_166; // @[TLB.scala:170:77] wire _entries_T_165; // @[TLB.scala:170:77] wire _entries_T_164; // @[TLB.scala:170:77] wire _entries_T_163; // @[TLB.scala:170:77] wire _entries_T_162; // @[TLB.scala:170:77] wire _entries_T_161; // @[TLB.scala:170:77] wire _entries_T_160; // @[TLB.scala:170:77] wire _entries_T_159; // @[TLB.scala:170:77] wire _entries_T_158; // @[TLB.scala:170:77] wire _entries_T_157; // @[TLB.scala:170:77] wire _entries_T_156; // @[TLB.scala:170:77] wire _entries_T_155; // @[TLB.scala:170:77] wire _entries_T_154; // @[TLB.scala:170:77] wire _entries_T_153; // @[TLB.scala:170:77] wire _entries_T_152; // @[TLB.scala:170:77] wire _entries_T_151; // @[TLB.scala:170:77] wire _entries_T_150; // @[TLB.scala:170:77] wire _entries_T_149; // @[TLB.scala:170:77] wire _entries_T_148; // @[TLB.scala:170:77] wire _entries_T_147; // @[TLB.scala:170:77] wire _entries_T_146; // @[TLB.scala:170:77] wire _entries_T_145; // @[TLB.scala:170:77] wire [3:0][41:0] _GEN_43 = {{sectored_entries_0_6_data_3}, {sectored_entries_0_6_data_2}, {sectored_entries_0_6_data_1}, {sectored_entries_0_6_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] _entries_WIRE_13 = _GEN_43[_entries_T_144]; // @[package.scala:163:13] assign _entries_T_145 = _entries_WIRE_13[0]; // @[TLB.scala:170:77] wire _entries_WIRE_12_fragmented_superpage = _entries_T_145; // @[TLB.scala:170:77] assign _entries_T_146 = _entries_WIRE_13[1]; // @[TLB.scala:170:77] wire _entries_WIRE_12_c = _entries_T_146; // @[TLB.scala:170:77] assign _entries_T_147 = _entries_WIRE_13[2]; // @[TLB.scala:170:77] wire _entries_WIRE_12_eff = _entries_T_147; // @[TLB.scala:170:77] assign _entries_T_148 = _entries_WIRE_13[3]; // @[TLB.scala:170:77] wire _entries_WIRE_12_paa = _entries_T_148; // @[TLB.scala:170:77] assign _entries_T_149 = _entries_WIRE_13[4]; // @[TLB.scala:170:77] wire _entries_WIRE_12_pal = _entries_T_149; // @[TLB.scala:170:77] assign _entries_T_150 = _entries_WIRE_13[5]; // @[TLB.scala:170:77] wire _entries_WIRE_12_ppp = _entries_T_150; // @[TLB.scala:170:77] assign _entries_T_151 = _entries_WIRE_13[6]; // @[TLB.scala:170:77] wire _entries_WIRE_12_pr = _entries_T_151; // @[TLB.scala:170:77] assign _entries_T_152 = _entries_WIRE_13[7]; // @[TLB.scala:170:77] wire _entries_WIRE_12_px = _entries_T_152; // @[TLB.scala:170:77] assign _entries_T_153 = _entries_WIRE_13[8]; // @[TLB.scala:170:77] wire _entries_WIRE_12_pw = _entries_T_153; // @[TLB.scala:170:77] assign _entries_T_154 = _entries_WIRE_13[9]; // @[TLB.scala:170:77] wire _entries_WIRE_12_hr = _entries_T_154; // @[TLB.scala:170:77] assign _entries_T_155 = _entries_WIRE_13[10]; // @[TLB.scala:170:77] wire _entries_WIRE_12_hx = _entries_T_155; // @[TLB.scala:170:77] assign _entries_T_156 = _entries_WIRE_13[11]; // @[TLB.scala:170:77] wire _entries_WIRE_12_hw = _entries_T_156; // @[TLB.scala:170:77] assign _entries_T_157 = _entries_WIRE_13[12]; // @[TLB.scala:170:77] wire _entries_WIRE_12_sr = _entries_T_157; // @[TLB.scala:170:77] assign _entries_T_158 = _entries_WIRE_13[13]; // @[TLB.scala:170:77] wire _entries_WIRE_12_sx = _entries_T_158; // @[TLB.scala:170:77] assign _entries_T_159 = _entries_WIRE_13[14]; // @[TLB.scala:170:77] wire _entries_WIRE_12_sw = _entries_T_159; // @[TLB.scala:170:77] assign _entries_T_160 = _entries_WIRE_13[15]; // @[TLB.scala:170:77] wire _entries_WIRE_12_gf = _entries_T_160; // @[TLB.scala:170:77] assign _entries_T_161 = _entries_WIRE_13[16]; // @[TLB.scala:170:77] wire _entries_WIRE_12_pf = _entries_T_161; // @[TLB.scala:170:77] assign _entries_T_162 = _entries_WIRE_13[17]; // @[TLB.scala:170:77] wire _entries_WIRE_12_ae_stage2 = _entries_T_162; // @[TLB.scala:170:77] assign _entries_T_163 = _entries_WIRE_13[18]; // @[TLB.scala:170:77] wire _entries_WIRE_12_ae_final = _entries_T_163; // @[TLB.scala:170:77] assign _entries_T_164 = _entries_WIRE_13[19]; // @[TLB.scala:170:77] wire _entries_WIRE_12_ae_ptw = _entries_T_164; // @[TLB.scala:170:77] assign _entries_T_165 = _entries_WIRE_13[20]; // @[TLB.scala:170:77] wire _entries_WIRE_12_g = _entries_T_165; // @[TLB.scala:170:77] assign _entries_T_166 = _entries_WIRE_13[21]; // @[TLB.scala:170:77] wire _entries_WIRE_12_u = _entries_T_166; // @[TLB.scala:170:77] assign _entries_T_167 = _entries_WIRE_13[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_12_ppn = _entries_T_167; // @[TLB.scala:170:77] wire [19:0] _entries_T_191; // @[TLB.scala:170:77] wire _entries_T_190; // @[TLB.scala:170:77] wire _entries_T_189; // @[TLB.scala:170:77] wire _entries_T_188; // @[TLB.scala:170:77] wire _entries_T_187; // @[TLB.scala:170:77] wire _entries_T_186; // @[TLB.scala:170:77] wire _entries_T_185; // @[TLB.scala:170:77] wire _entries_T_184; // @[TLB.scala:170:77] wire _entries_T_183; // @[TLB.scala:170:77] wire _entries_T_182; // @[TLB.scala:170:77] wire _entries_T_181; // @[TLB.scala:170:77] wire _entries_T_180; // @[TLB.scala:170:77] wire _entries_T_179; // @[TLB.scala:170:77] wire _entries_T_178; // @[TLB.scala:170:77] wire _entries_T_177; // @[TLB.scala:170:77] wire _entries_T_176; // @[TLB.scala:170:77] wire _entries_T_175; // @[TLB.scala:170:77] wire _entries_T_174; // @[TLB.scala:170:77] wire _entries_T_173; // @[TLB.scala:170:77] wire _entries_T_172; // @[TLB.scala:170:77] wire _entries_T_171; // @[TLB.scala:170:77] wire _entries_T_170; // @[TLB.scala:170:77] wire _entries_T_169; // @[TLB.scala:170:77] wire [3:0][41:0] _GEN_44 = {{sectored_entries_0_7_data_3}, {sectored_entries_0_7_data_2}, {sectored_entries_0_7_data_1}, {sectored_entries_0_7_data_0}}; // @[TLB.scala:170:77, :339:29] wire [41:0] _entries_WIRE_15 = _GEN_44[_entries_T_168]; // @[package.scala:163:13] assign _entries_T_169 = _entries_WIRE_15[0]; // @[TLB.scala:170:77] wire _entries_WIRE_14_fragmented_superpage = _entries_T_169; // @[TLB.scala:170:77] assign _entries_T_170 = _entries_WIRE_15[1]; // @[TLB.scala:170:77] wire _entries_WIRE_14_c = _entries_T_170; // @[TLB.scala:170:77] assign _entries_T_171 = _entries_WIRE_15[2]; // @[TLB.scala:170:77] wire _entries_WIRE_14_eff = _entries_T_171; // @[TLB.scala:170:77] assign _entries_T_172 = _entries_WIRE_15[3]; // @[TLB.scala:170:77] wire _entries_WIRE_14_paa = _entries_T_172; // @[TLB.scala:170:77] assign _entries_T_173 = _entries_WIRE_15[4]; // @[TLB.scala:170:77] wire _entries_WIRE_14_pal = _entries_T_173; // @[TLB.scala:170:77] assign _entries_T_174 = _entries_WIRE_15[5]; // @[TLB.scala:170:77] wire _entries_WIRE_14_ppp = _entries_T_174; // @[TLB.scala:170:77] assign _entries_T_175 = _entries_WIRE_15[6]; // @[TLB.scala:170:77] wire _entries_WIRE_14_pr = _entries_T_175; // @[TLB.scala:170:77] assign _entries_T_176 = _entries_WIRE_15[7]; // @[TLB.scala:170:77] wire _entries_WIRE_14_px = _entries_T_176; // @[TLB.scala:170:77] assign _entries_T_177 = _entries_WIRE_15[8]; // @[TLB.scala:170:77] wire _entries_WIRE_14_pw = _entries_T_177; // @[TLB.scala:170:77] assign _entries_T_178 = _entries_WIRE_15[9]; // @[TLB.scala:170:77] wire _entries_WIRE_14_hr = _entries_T_178; // @[TLB.scala:170:77] assign _entries_T_179 = _entries_WIRE_15[10]; // @[TLB.scala:170:77] wire _entries_WIRE_14_hx = _entries_T_179; // @[TLB.scala:170:77] assign _entries_T_180 = _entries_WIRE_15[11]; // @[TLB.scala:170:77] wire _entries_WIRE_14_hw = _entries_T_180; // @[TLB.scala:170:77] assign _entries_T_181 = _entries_WIRE_15[12]; // @[TLB.scala:170:77] wire _entries_WIRE_14_sr = _entries_T_181; // @[TLB.scala:170:77] assign _entries_T_182 = _entries_WIRE_15[13]; // @[TLB.scala:170:77] wire _entries_WIRE_14_sx = _entries_T_182; // @[TLB.scala:170:77] assign _entries_T_183 = _entries_WIRE_15[14]; // @[TLB.scala:170:77] wire _entries_WIRE_14_sw = _entries_T_183; // @[TLB.scala:170:77] assign _entries_T_184 = _entries_WIRE_15[15]; // @[TLB.scala:170:77] wire _entries_WIRE_14_gf = _entries_T_184; // @[TLB.scala:170:77] assign _entries_T_185 = _entries_WIRE_15[16]; // @[TLB.scala:170:77] wire _entries_WIRE_14_pf = _entries_T_185; // @[TLB.scala:170:77] assign _entries_T_186 = _entries_WIRE_15[17]; // @[TLB.scala:170:77] wire _entries_WIRE_14_ae_stage2 = _entries_T_186; // @[TLB.scala:170:77] assign _entries_T_187 = _entries_WIRE_15[18]; // @[TLB.scala:170:77] wire _entries_WIRE_14_ae_final = _entries_T_187; // @[TLB.scala:170:77] assign _entries_T_188 = _entries_WIRE_15[19]; // @[TLB.scala:170:77] wire _entries_WIRE_14_ae_ptw = _entries_T_188; // @[TLB.scala:170:77] assign _entries_T_189 = _entries_WIRE_15[20]; // @[TLB.scala:170:77] wire _entries_WIRE_14_g = _entries_T_189; // @[TLB.scala:170:77] assign _entries_T_190 = _entries_WIRE_15[21]; // @[TLB.scala:170:77] wire _entries_WIRE_14_u = _entries_T_190; // @[TLB.scala:170:77] assign _entries_T_191 = _entries_WIRE_15[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_14_ppn = _entries_T_191; // @[TLB.scala:170:77] wire [19:0] _entries_T_214; // @[TLB.scala:170:77] wire _entries_T_213; // @[TLB.scala:170:77] wire _entries_T_212; // @[TLB.scala:170:77] wire _entries_T_211; // @[TLB.scala:170:77] wire _entries_T_210; // @[TLB.scala:170:77] wire _entries_T_209; // @[TLB.scala:170:77] wire _entries_T_208; // @[TLB.scala:170:77] wire _entries_T_207; // @[TLB.scala:170:77] wire _entries_T_206; // @[TLB.scala:170:77] wire _entries_T_205; // @[TLB.scala:170:77] wire _entries_T_204; // @[TLB.scala:170:77] wire _entries_T_203; // @[TLB.scala:170:77] wire _entries_T_202; // @[TLB.scala:170:77] wire _entries_T_201; // @[TLB.scala:170:77] wire _entries_T_200; // @[TLB.scala:170:77] wire _entries_T_199; // @[TLB.scala:170:77] wire _entries_T_198; // @[TLB.scala:170:77] wire _entries_T_197; // @[TLB.scala:170:77] wire _entries_T_196; // @[TLB.scala:170:77] wire _entries_T_195; // @[TLB.scala:170:77] wire _entries_T_194; // @[TLB.scala:170:77] wire _entries_T_193; // @[TLB.scala:170:77] wire _entries_T_192; // @[TLB.scala:170:77] assign _entries_T_192 = _entries_WIRE_17[0]; // @[TLB.scala:170:77] wire _entries_WIRE_16_fragmented_superpage = _entries_T_192; // @[TLB.scala:170:77] assign _entries_T_193 = _entries_WIRE_17[1]; // @[TLB.scala:170:77] wire _entries_WIRE_16_c = _entries_T_193; // @[TLB.scala:170:77] assign _entries_T_194 = _entries_WIRE_17[2]; // @[TLB.scala:170:77] wire _entries_WIRE_16_eff = _entries_T_194; // @[TLB.scala:170:77] assign _entries_T_195 = _entries_WIRE_17[3]; // @[TLB.scala:170:77] wire _entries_WIRE_16_paa = _entries_T_195; // @[TLB.scala:170:77] assign _entries_T_196 = _entries_WIRE_17[4]; // @[TLB.scala:170:77] wire _entries_WIRE_16_pal = _entries_T_196; // @[TLB.scala:170:77] assign _entries_T_197 = _entries_WIRE_17[5]; // @[TLB.scala:170:77] wire _entries_WIRE_16_ppp = _entries_T_197; // @[TLB.scala:170:77] assign _entries_T_198 = _entries_WIRE_17[6]; // @[TLB.scala:170:77] wire _entries_WIRE_16_pr = _entries_T_198; // @[TLB.scala:170:77] assign _entries_T_199 = _entries_WIRE_17[7]; // @[TLB.scala:170:77] wire _entries_WIRE_16_px = _entries_T_199; // @[TLB.scala:170:77] assign _entries_T_200 = _entries_WIRE_17[8]; // @[TLB.scala:170:77] wire _entries_WIRE_16_pw = _entries_T_200; // @[TLB.scala:170:77] assign _entries_T_201 = _entries_WIRE_17[9]; // @[TLB.scala:170:77] wire _entries_WIRE_16_hr = _entries_T_201; // @[TLB.scala:170:77] assign _entries_T_202 = _entries_WIRE_17[10]; // @[TLB.scala:170:77] wire _entries_WIRE_16_hx = _entries_T_202; // @[TLB.scala:170:77] assign _entries_T_203 = _entries_WIRE_17[11]; // @[TLB.scala:170:77] wire _entries_WIRE_16_hw = _entries_T_203; // @[TLB.scala:170:77] assign _entries_T_204 = _entries_WIRE_17[12]; // @[TLB.scala:170:77] wire _entries_WIRE_16_sr = _entries_T_204; // @[TLB.scala:170:77] assign _entries_T_205 = _entries_WIRE_17[13]; // @[TLB.scala:170:77] wire _entries_WIRE_16_sx = _entries_T_205; // @[TLB.scala:170:77] assign _entries_T_206 = _entries_WIRE_17[14]; // @[TLB.scala:170:77] wire _entries_WIRE_16_sw = _entries_T_206; // @[TLB.scala:170:77] assign _entries_T_207 = _entries_WIRE_17[15]; // @[TLB.scala:170:77] wire _entries_WIRE_16_gf = _entries_T_207; // @[TLB.scala:170:77] assign _entries_T_208 = _entries_WIRE_17[16]; // @[TLB.scala:170:77] wire _entries_WIRE_16_pf = _entries_T_208; // @[TLB.scala:170:77] assign _entries_T_209 = _entries_WIRE_17[17]; // @[TLB.scala:170:77] wire _entries_WIRE_16_ae_stage2 = _entries_T_209; // @[TLB.scala:170:77] assign _entries_T_210 = _entries_WIRE_17[18]; // @[TLB.scala:170:77] wire _entries_WIRE_16_ae_final = _entries_T_210; // @[TLB.scala:170:77] assign _entries_T_211 = _entries_WIRE_17[19]; // @[TLB.scala:170:77] wire _entries_WIRE_16_ae_ptw = _entries_T_211; // @[TLB.scala:170:77] assign _entries_T_212 = _entries_WIRE_17[20]; // @[TLB.scala:170:77] wire _entries_WIRE_16_g = _entries_T_212; // @[TLB.scala:170:77] assign _entries_T_213 = _entries_WIRE_17[21]; // @[TLB.scala:170:77] wire _entries_WIRE_16_u = _entries_T_213; // @[TLB.scala:170:77] assign _entries_T_214 = _entries_WIRE_17[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_16_ppn = _entries_T_214; // @[TLB.scala:170:77] wire [19:0] _entries_T_237; // @[TLB.scala:170:77] wire _entries_T_236; // @[TLB.scala:170:77] wire _entries_T_235; // @[TLB.scala:170:77] wire _entries_T_234; // @[TLB.scala:170:77] wire _entries_T_233; // @[TLB.scala:170:77] wire _entries_T_232; // @[TLB.scala:170:77] wire _entries_T_231; // @[TLB.scala:170:77] wire _entries_T_230; // @[TLB.scala:170:77] wire _entries_T_229; // @[TLB.scala:170:77] wire _entries_T_228; // @[TLB.scala:170:77] wire _entries_T_227; // @[TLB.scala:170:77] wire _entries_T_226; // @[TLB.scala:170:77] wire _entries_T_225; // @[TLB.scala:170:77] wire _entries_T_224; // @[TLB.scala:170:77] wire _entries_T_223; // @[TLB.scala:170:77] wire _entries_T_222; // @[TLB.scala:170:77] wire _entries_T_221; // @[TLB.scala:170:77] wire _entries_T_220; // @[TLB.scala:170:77] wire _entries_T_219; // @[TLB.scala:170:77] wire _entries_T_218; // @[TLB.scala:170:77] wire _entries_T_217; // @[TLB.scala:170:77] wire _entries_T_216; // @[TLB.scala:170:77] wire _entries_T_215; // @[TLB.scala:170:77] assign _entries_T_215 = _entries_WIRE_19[0]; // @[TLB.scala:170:77] wire _entries_WIRE_18_fragmented_superpage = _entries_T_215; // @[TLB.scala:170:77] assign _entries_T_216 = _entries_WIRE_19[1]; // @[TLB.scala:170:77] wire _entries_WIRE_18_c = _entries_T_216; // @[TLB.scala:170:77] assign _entries_T_217 = _entries_WIRE_19[2]; // @[TLB.scala:170:77] wire _entries_WIRE_18_eff = _entries_T_217; // @[TLB.scala:170:77] assign _entries_T_218 = _entries_WIRE_19[3]; // @[TLB.scala:170:77] wire _entries_WIRE_18_paa = _entries_T_218; // @[TLB.scala:170:77] assign _entries_T_219 = _entries_WIRE_19[4]; // @[TLB.scala:170:77] wire _entries_WIRE_18_pal = _entries_T_219; // @[TLB.scala:170:77] assign _entries_T_220 = _entries_WIRE_19[5]; // @[TLB.scala:170:77] wire _entries_WIRE_18_ppp = _entries_T_220; // @[TLB.scala:170:77] assign _entries_T_221 = _entries_WIRE_19[6]; // @[TLB.scala:170:77] wire _entries_WIRE_18_pr = _entries_T_221; // @[TLB.scala:170:77] assign _entries_T_222 = _entries_WIRE_19[7]; // @[TLB.scala:170:77] wire _entries_WIRE_18_px = _entries_T_222; // @[TLB.scala:170:77] assign _entries_T_223 = _entries_WIRE_19[8]; // @[TLB.scala:170:77] wire _entries_WIRE_18_pw = _entries_T_223; // @[TLB.scala:170:77] assign _entries_T_224 = _entries_WIRE_19[9]; // @[TLB.scala:170:77] wire _entries_WIRE_18_hr = _entries_T_224; // @[TLB.scala:170:77] assign _entries_T_225 = _entries_WIRE_19[10]; // @[TLB.scala:170:77] wire _entries_WIRE_18_hx = _entries_T_225; // @[TLB.scala:170:77] assign _entries_T_226 = _entries_WIRE_19[11]; // @[TLB.scala:170:77] wire _entries_WIRE_18_hw = _entries_T_226; // @[TLB.scala:170:77] assign _entries_T_227 = _entries_WIRE_19[12]; // @[TLB.scala:170:77] wire _entries_WIRE_18_sr = _entries_T_227; // @[TLB.scala:170:77] assign _entries_T_228 = _entries_WIRE_19[13]; // @[TLB.scala:170:77] wire _entries_WIRE_18_sx = _entries_T_228; // @[TLB.scala:170:77] assign _entries_T_229 = _entries_WIRE_19[14]; // @[TLB.scala:170:77] wire _entries_WIRE_18_sw = _entries_T_229; // @[TLB.scala:170:77] assign _entries_T_230 = _entries_WIRE_19[15]; // @[TLB.scala:170:77] wire _entries_WIRE_18_gf = _entries_T_230; // @[TLB.scala:170:77] assign _entries_T_231 = _entries_WIRE_19[16]; // @[TLB.scala:170:77] wire _entries_WIRE_18_pf = _entries_T_231; // @[TLB.scala:170:77] assign _entries_T_232 = _entries_WIRE_19[17]; // @[TLB.scala:170:77] wire _entries_WIRE_18_ae_stage2 = _entries_T_232; // @[TLB.scala:170:77] assign _entries_T_233 = _entries_WIRE_19[18]; // @[TLB.scala:170:77] wire _entries_WIRE_18_ae_final = _entries_T_233; // @[TLB.scala:170:77] assign _entries_T_234 = _entries_WIRE_19[19]; // @[TLB.scala:170:77] wire _entries_WIRE_18_ae_ptw = _entries_T_234; // @[TLB.scala:170:77] assign _entries_T_235 = _entries_WIRE_19[20]; // @[TLB.scala:170:77] wire _entries_WIRE_18_g = _entries_T_235; // @[TLB.scala:170:77] assign _entries_T_236 = _entries_WIRE_19[21]; // @[TLB.scala:170:77] wire _entries_WIRE_18_u = _entries_T_236; // @[TLB.scala:170:77] assign _entries_T_237 = _entries_WIRE_19[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_18_ppn = _entries_T_237; // @[TLB.scala:170:77] wire [19:0] _entries_T_260; // @[TLB.scala:170:77] wire _entries_T_259; // @[TLB.scala:170:77] wire _entries_T_258; // @[TLB.scala:170:77] wire _entries_T_257; // @[TLB.scala:170:77] wire _entries_T_256; // @[TLB.scala:170:77] wire _entries_T_255; // @[TLB.scala:170:77] wire _entries_T_254; // @[TLB.scala:170:77] wire _entries_T_253; // @[TLB.scala:170:77] wire _entries_T_252; // @[TLB.scala:170:77] wire _entries_T_251; // @[TLB.scala:170:77] wire _entries_T_250; // @[TLB.scala:170:77] wire _entries_T_249; // @[TLB.scala:170:77] wire _entries_T_248; // @[TLB.scala:170:77] wire _entries_T_247; // @[TLB.scala:170:77] wire _entries_T_246; // @[TLB.scala:170:77] wire _entries_T_245; // @[TLB.scala:170:77] wire _entries_T_244; // @[TLB.scala:170:77] wire _entries_T_243; // @[TLB.scala:170:77] wire _entries_T_242; // @[TLB.scala:170:77] wire _entries_T_241; // @[TLB.scala:170:77] wire _entries_T_240; // @[TLB.scala:170:77] wire _entries_T_239; // @[TLB.scala:170:77] wire _entries_T_238; // @[TLB.scala:170:77] assign _entries_T_238 = _entries_WIRE_21[0]; // @[TLB.scala:170:77] wire _entries_WIRE_20_fragmented_superpage = _entries_T_238; // @[TLB.scala:170:77] assign _entries_T_239 = _entries_WIRE_21[1]; // @[TLB.scala:170:77] wire _entries_WIRE_20_c = _entries_T_239; // @[TLB.scala:170:77] assign _entries_T_240 = _entries_WIRE_21[2]; // @[TLB.scala:170:77] wire _entries_WIRE_20_eff = _entries_T_240; // @[TLB.scala:170:77] assign _entries_T_241 = _entries_WIRE_21[3]; // @[TLB.scala:170:77] wire _entries_WIRE_20_paa = _entries_T_241; // @[TLB.scala:170:77] assign _entries_T_242 = _entries_WIRE_21[4]; // @[TLB.scala:170:77] wire _entries_WIRE_20_pal = _entries_T_242; // @[TLB.scala:170:77] assign _entries_T_243 = _entries_WIRE_21[5]; // @[TLB.scala:170:77] wire _entries_WIRE_20_ppp = _entries_T_243; // @[TLB.scala:170:77] assign _entries_T_244 = _entries_WIRE_21[6]; // @[TLB.scala:170:77] wire _entries_WIRE_20_pr = _entries_T_244; // @[TLB.scala:170:77] assign _entries_T_245 = _entries_WIRE_21[7]; // @[TLB.scala:170:77] wire _entries_WIRE_20_px = _entries_T_245; // @[TLB.scala:170:77] assign _entries_T_246 = _entries_WIRE_21[8]; // @[TLB.scala:170:77] wire _entries_WIRE_20_pw = _entries_T_246; // @[TLB.scala:170:77] assign _entries_T_247 = _entries_WIRE_21[9]; // @[TLB.scala:170:77] wire _entries_WIRE_20_hr = _entries_T_247; // @[TLB.scala:170:77] assign _entries_T_248 = _entries_WIRE_21[10]; // @[TLB.scala:170:77] wire _entries_WIRE_20_hx = _entries_T_248; // @[TLB.scala:170:77] assign _entries_T_249 = _entries_WIRE_21[11]; // @[TLB.scala:170:77] wire _entries_WIRE_20_hw = _entries_T_249; // @[TLB.scala:170:77] assign _entries_T_250 = _entries_WIRE_21[12]; // @[TLB.scala:170:77] wire _entries_WIRE_20_sr = _entries_T_250; // @[TLB.scala:170:77] assign _entries_T_251 = _entries_WIRE_21[13]; // @[TLB.scala:170:77] wire _entries_WIRE_20_sx = _entries_T_251; // @[TLB.scala:170:77] assign _entries_T_252 = _entries_WIRE_21[14]; // @[TLB.scala:170:77] wire _entries_WIRE_20_sw = _entries_T_252; // @[TLB.scala:170:77] assign _entries_T_253 = _entries_WIRE_21[15]; // @[TLB.scala:170:77] wire _entries_WIRE_20_gf = _entries_T_253; // @[TLB.scala:170:77] assign _entries_T_254 = _entries_WIRE_21[16]; // @[TLB.scala:170:77] wire _entries_WIRE_20_pf = _entries_T_254; // @[TLB.scala:170:77] assign _entries_T_255 = _entries_WIRE_21[17]; // @[TLB.scala:170:77] wire _entries_WIRE_20_ae_stage2 = _entries_T_255; // @[TLB.scala:170:77] assign _entries_T_256 = _entries_WIRE_21[18]; // @[TLB.scala:170:77] wire _entries_WIRE_20_ae_final = _entries_T_256; // @[TLB.scala:170:77] assign _entries_T_257 = _entries_WIRE_21[19]; // @[TLB.scala:170:77] wire _entries_WIRE_20_ae_ptw = _entries_T_257; // @[TLB.scala:170:77] assign _entries_T_258 = _entries_WIRE_21[20]; // @[TLB.scala:170:77] wire _entries_WIRE_20_g = _entries_T_258; // @[TLB.scala:170:77] assign _entries_T_259 = _entries_WIRE_21[21]; // @[TLB.scala:170:77] wire _entries_WIRE_20_u = _entries_T_259; // @[TLB.scala:170:77] assign _entries_T_260 = _entries_WIRE_21[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_20_ppn = _entries_T_260; // @[TLB.scala:170:77] wire [19:0] _entries_T_283; // @[TLB.scala:170:77] wire _entries_T_282; // @[TLB.scala:170:77] wire _entries_T_281; // @[TLB.scala:170:77] wire _entries_T_280; // @[TLB.scala:170:77] wire _entries_T_279; // @[TLB.scala:170:77] wire _entries_T_278; // @[TLB.scala:170:77] wire _entries_T_277; // @[TLB.scala:170:77] wire _entries_T_276; // @[TLB.scala:170:77] wire _entries_T_275; // @[TLB.scala:170:77] wire _entries_T_274; // @[TLB.scala:170:77] wire _entries_T_273; // @[TLB.scala:170:77] wire _entries_T_272; // @[TLB.scala:170:77] wire _entries_T_271; // @[TLB.scala:170:77] wire _entries_T_270; // @[TLB.scala:170:77] wire _entries_T_269; // @[TLB.scala:170:77] wire _entries_T_268; // @[TLB.scala:170:77] wire _entries_T_267; // @[TLB.scala:170:77] wire _entries_T_266; // @[TLB.scala:170:77] wire _entries_T_265; // @[TLB.scala:170:77] wire _entries_T_264; // @[TLB.scala:170:77] wire _entries_T_263; // @[TLB.scala:170:77] wire _entries_T_262; // @[TLB.scala:170:77] wire _entries_T_261; // @[TLB.scala:170:77] assign _entries_T_261 = _entries_WIRE_23[0]; // @[TLB.scala:170:77] wire _entries_WIRE_22_fragmented_superpage = _entries_T_261; // @[TLB.scala:170:77] assign _entries_T_262 = _entries_WIRE_23[1]; // @[TLB.scala:170:77] wire _entries_WIRE_22_c = _entries_T_262; // @[TLB.scala:170:77] assign _entries_T_263 = _entries_WIRE_23[2]; // @[TLB.scala:170:77] wire _entries_WIRE_22_eff = _entries_T_263; // @[TLB.scala:170:77] assign _entries_T_264 = _entries_WIRE_23[3]; // @[TLB.scala:170:77] wire _entries_WIRE_22_paa = _entries_T_264; // @[TLB.scala:170:77] assign _entries_T_265 = _entries_WIRE_23[4]; // @[TLB.scala:170:77] wire _entries_WIRE_22_pal = _entries_T_265; // @[TLB.scala:170:77] assign _entries_T_266 = _entries_WIRE_23[5]; // @[TLB.scala:170:77] wire _entries_WIRE_22_ppp = _entries_T_266; // @[TLB.scala:170:77] assign _entries_T_267 = _entries_WIRE_23[6]; // @[TLB.scala:170:77] wire _entries_WIRE_22_pr = _entries_T_267; // @[TLB.scala:170:77] assign _entries_T_268 = _entries_WIRE_23[7]; // @[TLB.scala:170:77] wire _entries_WIRE_22_px = _entries_T_268; // @[TLB.scala:170:77] assign _entries_T_269 = _entries_WIRE_23[8]; // @[TLB.scala:170:77] wire _entries_WIRE_22_pw = _entries_T_269; // @[TLB.scala:170:77] assign _entries_T_270 = _entries_WIRE_23[9]; // @[TLB.scala:170:77] wire _entries_WIRE_22_hr = _entries_T_270; // @[TLB.scala:170:77] assign _entries_T_271 = _entries_WIRE_23[10]; // @[TLB.scala:170:77] wire _entries_WIRE_22_hx = _entries_T_271; // @[TLB.scala:170:77] assign _entries_T_272 = _entries_WIRE_23[11]; // @[TLB.scala:170:77] wire _entries_WIRE_22_hw = _entries_T_272; // @[TLB.scala:170:77] assign _entries_T_273 = _entries_WIRE_23[12]; // @[TLB.scala:170:77] wire _entries_WIRE_22_sr = _entries_T_273; // @[TLB.scala:170:77] assign _entries_T_274 = _entries_WIRE_23[13]; // @[TLB.scala:170:77] wire _entries_WIRE_22_sx = _entries_T_274; // @[TLB.scala:170:77] assign _entries_T_275 = _entries_WIRE_23[14]; // @[TLB.scala:170:77] wire _entries_WIRE_22_sw = _entries_T_275; // @[TLB.scala:170:77] assign _entries_T_276 = _entries_WIRE_23[15]; // @[TLB.scala:170:77] wire _entries_WIRE_22_gf = _entries_T_276; // @[TLB.scala:170:77] assign _entries_T_277 = _entries_WIRE_23[16]; // @[TLB.scala:170:77] wire _entries_WIRE_22_pf = _entries_T_277; // @[TLB.scala:170:77] assign _entries_T_278 = _entries_WIRE_23[17]; // @[TLB.scala:170:77] wire _entries_WIRE_22_ae_stage2 = _entries_T_278; // @[TLB.scala:170:77] assign _entries_T_279 = _entries_WIRE_23[18]; // @[TLB.scala:170:77] wire _entries_WIRE_22_ae_final = _entries_T_279; // @[TLB.scala:170:77] assign _entries_T_280 = _entries_WIRE_23[19]; // @[TLB.scala:170:77] wire _entries_WIRE_22_ae_ptw = _entries_T_280; // @[TLB.scala:170:77] assign _entries_T_281 = _entries_WIRE_23[20]; // @[TLB.scala:170:77] wire _entries_WIRE_22_g = _entries_T_281; // @[TLB.scala:170:77] assign _entries_T_282 = _entries_WIRE_23[21]; // @[TLB.scala:170:77] wire _entries_WIRE_22_u = _entries_T_282; // @[TLB.scala:170:77] assign _entries_T_283 = _entries_WIRE_23[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_22_ppn = _entries_T_283; // @[TLB.scala:170:77] wire [19:0] _entries_T_306; // @[TLB.scala:170:77] wire _entries_T_305; // @[TLB.scala:170:77] wire _entries_T_304; // @[TLB.scala:170:77] wire _entries_T_303; // @[TLB.scala:170:77] wire _entries_T_302; // @[TLB.scala:170:77] wire _entries_T_301; // @[TLB.scala:170:77] wire _entries_T_300; // @[TLB.scala:170:77] wire _entries_T_299; // @[TLB.scala:170:77] wire _entries_T_298; // @[TLB.scala:170:77] wire _entries_T_297; // @[TLB.scala:170:77] wire _entries_T_296; // @[TLB.scala:170:77] wire _entries_T_295; // @[TLB.scala:170:77] wire _entries_T_294; // @[TLB.scala:170:77] wire _entries_T_293; // @[TLB.scala:170:77] wire _entries_T_292; // @[TLB.scala:170:77] wire _entries_T_291; // @[TLB.scala:170:77] wire _entries_T_290; // @[TLB.scala:170:77] wire _entries_T_289; // @[TLB.scala:170:77] wire _entries_T_288; // @[TLB.scala:170:77] wire _entries_T_287; // @[TLB.scala:170:77] wire _entries_T_286; // @[TLB.scala:170:77] wire _entries_T_285; // @[TLB.scala:170:77] wire _entries_T_284; // @[TLB.scala:170:77] assign _entries_T_284 = _entries_WIRE_25[0]; // @[TLB.scala:170:77] wire _entries_WIRE_24_fragmented_superpage = _entries_T_284; // @[TLB.scala:170:77] assign _entries_T_285 = _entries_WIRE_25[1]; // @[TLB.scala:170:77] wire _entries_WIRE_24_c = _entries_T_285; // @[TLB.scala:170:77] assign _entries_T_286 = _entries_WIRE_25[2]; // @[TLB.scala:170:77] wire _entries_WIRE_24_eff = _entries_T_286; // @[TLB.scala:170:77] assign _entries_T_287 = _entries_WIRE_25[3]; // @[TLB.scala:170:77] wire _entries_WIRE_24_paa = _entries_T_287; // @[TLB.scala:170:77] assign _entries_T_288 = _entries_WIRE_25[4]; // @[TLB.scala:170:77] wire _entries_WIRE_24_pal = _entries_T_288; // @[TLB.scala:170:77] assign _entries_T_289 = _entries_WIRE_25[5]; // @[TLB.scala:170:77] wire _entries_WIRE_24_ppp = _entries_T_289; // @[TLB.scala:170:77] assign _entries_T_290 = _entries_WIRE_25[6]; // @[TLB.scala:170:77] wire _entries_WIRE_24_pr = _entries_T_290; // @[TLB.scala:170:77] assign _entries_T_291 = _entries_WIRE_25[7]; // @[TLB.scala:170:77] wire _entries_WIRE_24_px = _entries_T_291; // @[TLB.scala:170:77] assign _entries_T_292 = _entries_WIRE_25[8]; // @[TLB.scala:170:77] wire _entries_WIRE_24_pw = _entries_T_292; // @[TLB.scala:170:77] assign _entries_T_293 = _entries_WIRE_25[9]; // @[TLB.scala:170:77] wire _entries_WIRE_24_hr = _entries_T_293; // @[TLB.scala:170:77] assign _entries_T_294 = _entries_WIRE_25[10]; // @[TLB.scala:170:77] wire _entries_WIRE_24_hx = _entries_T_294; // @[TLB.scala:170:77] assign _entries_T_295 = _entries_WIRE_25[11]; // @[TLB.scala:170:77] wire _entries_WIRE_24_hw = _entries_T_295; // @[TLB.scala:170:77] assign _entries_T_296 = _entries_WIRE_25[12]; // @[TLB.scala:170:77] wire _entries_WIRE_24_sr = _entries_T_296; // @[TLB.scala:170:77] assign _entries_T_297 = _entries_WIRE_25[13]; // @[TLB.scala:170:77] wire _entries_WIRE_24_sx = _entries_T_297; // @[TLB.scala:170:77] assign _entries_T_298 = _entries_WIRE_25[14]; // @[TLB.scala:170:77] wire _entries_WIRE_24_sw = _entries_T_298; // @[TLB.scala:170:77] assign _entries_T_299 = _entries_WIRE_25[15]; // @[TLB.scala:170:77] wire _entries_WIRE_24_gf = _entries_T_299; // @[TLB.scala:170:77] assign _entries_T_300 = _entries_WIRE_25[16]; // @[TLB.scala:170:77] wire _entries_WIRE_24_pf = _entries_T_300; // @[TLB.scala:170:77] assign _entries_T_301 = _entries_WIRE_25[17]; // @[TLB.scala:170:77] wire _entries_WIRE_24_ae_stage2 = _entries_T_301; // @[TLB.scala:170:77] assign _entries_T_302 = _entries_WIRE_25[18]; // @[TLB.scala:170:77] wire _entries_WIRE_24_ae_final = _entries_T_302; // @[TLB.scala:170:77] assign _entries_T_303 = _entries_WIRE_25[19]; // @[TLB.scala:170:77] wire _entries_WIRE_24_ae_ptw = _entries_T_303; // @[TLB.scala:170:77] assign _entries_T_304 = _entries_WIRE_25[20]; // @[TLB.scala:170:77] wire _entries_WIRE_24_g = _entries_T_304; // @[TLB.scala:170:77] assign _entries_T_305 = _entries_WIRE_25[21]; // @[TLB.scala:170:77] wire _entries_WIRE_24_u = _entries_T_305; // @[TLB.scala:170:77] assign _entries_T_306 = _entries_WIRE_25[41:22]; // @[TLB.scala:170:77] wire [19:0] _entries_WIRE_24_ppn = _entries_T_306; // @[TLB.scala:170:77] wire _ppn_T = ~vm_enabled; // @[TLB.scala:399:61, :442:18, :502:30] wire ppn_ignore = _ppn_ignore_T; // @[TLB.scala:197:{28,34}] wire [35:0] _ppn_T_1 = ppn_ignore ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _ppn_T_2 = {_ppn_T_1[35:20], _ppn_T_1[19:0] | _entries_barrier_8_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_3 = _ppn_T_2[26:18]; // @[TLB.scala:198:{47,58}] wire [9:0] _ppn_T_4 = {1'h0, _ppn_T_3}; // @[TLB.scala:198:{18,58}] wire _ppn_ignore_T_1 = ~(superpage_entries_0_level[1]); // @[TLB.scala:182:28, :197:28, :341:30] wire ppn_ignore_1 = _ppn_ignore_T_1; // @[TLB.scala:197:{28,34}] wire [35:0] _ppn_T_5 = ppn_ignore_1 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _ppn_T_6 = {_ppn_T_5[35:20], _ppn_T_5[19:0] | _entries_barrier_8_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_7 = _ppn_T_6[17:9]; // @[TLB.scala:198:{47,58}] wire [18:0] _ppn_T_8 = {_ppn_T_4, _ppn_T_7}; // @[TLB.scala:198:{18,58}] wire [35:0] _ppn_T_10 = {_ppn_T_9[35:20], _ppn_T_9[19:0] | _entries_barrier_8_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_11 = _ppn_T_10[8:0]; // @[TLB.scala:198:{47,58}] wire [27:0] _ppn_T_12 = {_ppn_T_8, _ppn_T_11}; // @[TLB.scala:198:{18,58}] wire ppn_ignore_3 = _ppn_ignore_T_3; // @[TLB.scala:197:{28,34}] wire [35:0] _ppn_T_13 = ppn_ignore_3 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _ppn_T_14 = {_ppn_T_13[35:20], _ppn_T_13[19:0] | _entries_barrier_9_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_15 = _ppn_T_14[26:18]; // @[TLB.scala:198:{47,58}] wire [9:0] _ppn_T_16 = {1'h0, _ppn_T_15}; // @[TLB.scala:198:{18,58}] wire _ppn_ignore_T_4 = ~(superpage_entries_1_level[1]); // @[TLB.scala:182:28, :197:28, :341:30] wire ppn_ignore_4 = _ppn_ignore_T_4; // @[TLB.scala:197:{28,34}] wire [35:0] _ppn_T_17 = ppn_ignore_4 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _ppn_T_18 = {_ppn_T_17[35:20], _ppn_T_17[19:0] | _entries_barrier_9_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_19 = _ppn_T_18[17:9]; // @[TLB.scala:198:{47,58}] wire [18:0] _ppn_T_20 = {_ppn_T_16, _ppn_T_19}; // @[TLB.scala:198:{18,58}] wire [35:0] _ppn_T_22 = {_ppn_T_21[35:20], _ppn_T_21[19:0] | _entries_barrier_9_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_23 = _ppn_T_22[8:0]; // @[TLB.scala:198:{47,58}] wire [27:0] _ppn_T_24 = {_ppn_T_20, _ppn_T_23}; // @[TLB.scala:198:{18,58}] wire ppn_ignore_6 = _ppn_ignore_T_6; // @[TLB.scala:197:{28,34}] wire [35:0] _ppn_T_25 = ppn_ignore_6 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _ppn_T_26 = {_ppn_T_25[35:20], _ppn_T_25[19:0] | _entries_barrier_10_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_27 = _ppn_T_26[26:18]; // @[TLB.scala:198:{47,58}] wire [9:0] _ppn_T_28 = {1'h0, _ppn_T_27}; // @[TLB.scala:198:{18,58}] wire _ppn_ignore_T_7 = ~(superpage_entries_2_level[1]); // @[TLB.scala:182:28, :197:28, :341:30] wire ppn_ignore_7 = _ppn_ignore_T_7; // @[TLB.scala:197:{28,34}] wire [35:0] _ppn_T_29 = ppn_ignore_7 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _ppn_T_30 = {_ppn_T_29[35:20], _ppn_T_29[19:0] | _entries_barrier_10_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_31 = _ppn_T_30[17:9]; // @[TLB.scala:198:{47,58}] wire [18:0] _ppn_T_32 = {_ppn_T_28, _ppn_T_31}; // @[TLB.scala:198:{18,58}] wire [35:0] _ppn_T_34 = {_ppn_T_33[35:20], _ppn_T_33[19:0] | _entries_barrier_10_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_35 = _ppn_T_34[8:0]; // @[TLB.scala:198:{47,58}] wire [27:0] _ppn_T_36 = {_ppn_T_32, _ppn_T_35}; // @[TLB.scala:198:{18,58}] wire ppn_ignore_9 = _ppn_ignore_T_9; // @[TLB.scala:197:{28,34}] wire [35:0] _ppn_T_37 = ppn_ignore_9 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _ppn_T_38 = {_ppn_T_37[35:20], _ppn_T_37[19:0] | _entries_barrier_11_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_39 = _ppn_T_38[26:18]; // @[TLB.scala:198:{47,58}] wire [9:0] _ppn_T_40 = {1'h0, _ppn_T_39}; // @[TLB.scala:198:{18,58}] wire _ppn_ignore_T_10 = ~(superpage_entries_3_level[1]); // @[TLB.scala:182:28, :197:28, :341:30] wire ppn_ignore_10 = _ppn_ignore_T_10; // @[TLB.scala:197:{28,34}] wire [35:0] _ppn_T_41 = ppn_ignore_10 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _ppn_T_42 = {_ppn_T_41[35:20], _ppn_T_41[19:0] | _entries_barrier_11_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_43 = _ppn_T_42[17:9]; // @[TLB.scala:198:{47,58}] wire [18:0] _ppn_T_44 = {_ppn_T_40, _ppn_T_43}; // @[TLB.scala:198:{18,58}] wire [35:0] _ppn_T_46 = {_ppn_T_45[35:20], _ppn_T_45[19:0] | _entries_barrier_11_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_47 = _ppn_T_46[8:0]; // @[TLB.scala:198:{47,58}] wire [27:0] _ppn_T_48 = {_ppn_T_44, _ppn_T_47}; // @[TLB.scala:198:{18,58}] wire ppn_ignore_12 = _ppn_ignore_T_12; // @[TLB.scala:197:{28,34}] wire [35:0] _ppn_T_49 = ppn_ignore_12 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _ppn_T_50 = {_ppn_T_49[35:20], _ppn_T_49[19:0] | _entries_barrier_12_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_51 = _ppn_T_50[26:18]; // @[TLB.scala:198:{47,58}] wire [9:0] _ppn_T_52 = {1'h0, _ppn_T_51}; // @[TLB.scala:198:{18,58}] wire _ppn_ignore_T_13 = ~(special_entry_level[1]); // @[TLB.scala:197:28, :346:56] wire ppn_ignore_13 = _ppn_ignore_T_13; // @[TLB.scala:197:{28,34}] wire [35:0] _ppn_T_53 = ppn_ignore_13 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _ppn_T_54 = {_ppn_T_53[35:20], _ppn_T_53[19:0] | _entries_barrier_12_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_55 = _ppn_T_54[17:9]; // @[TLB.scala:198:{47,58}] wire [18:0] _ppn_T_56 = {_ppn_T_52, _ppn_T_55}; // @[TLB.scala:198:{18,58}] wire ppn_ignore_14 = _ppn_ignore_T_14; // @[TLB.scala:197:{28,34}] wire [35:0] _ppn_T_57 = ppn_ignore_14 ? vpn : 36'h0; // @[TLB.scala:197:34, :198:28, :335:30] wire [35:0] _ppn_T_58 = {_ppn_T_57[35:20], _ppn_T_57[19:0] | _entries_barrier_12_io_y_ppn}; // @[package.scala:267:25] wire [8:0] _ppn_T_59 = _ppn_T_58[8:0]; // @[TLB.scala:198:{47,58}] wire [27:0] _ppn_T_60 = {_ppn_T_56, _ppn_T_59}; // @[TLB.scala:198:{18,58}] wire [19:0] _ppn_T_61 = vpn[19:0]; // @[TLB.scala:335:30, :502:125] wire [19:0] _ppn_T_62 = hitsVec_0 ? _entries_barrier_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_63 = hitsVec_1 ? _entries_barrier_1_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_64 = hitsVec_2 ? _entries_barrier_2_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_65 = hitsVec_3 ? _entries_barrier_3_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_66 = hitsVec_4 ? _entries_barrier_4_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_67 = hitsVec_5 ? _entries_barrier_5_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_68 = hitsVec_6 ? _entries_barrier_6_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_69 = hitsVec_7 ? _entries_barrier_7_io_y_ppn : 20'h0; // @[Mux.scala:30:73] wire [27:0] _ppn_T_70 = hitsVec_8 ? _ppn_T_12 : 28'h0; // @[Mux.scala:30:73] wire [27:0] _ppn_T_71 = hitsVec_9 ? _ppn_T_24 : 28'h0; // @[Mux.scala:30:73] wire [27:0] _ppn_T_72 = hitsVec_10 ? _ppn_T_36 : 28'h0; // @[Mux.scala:30:73] wire [27:0] _ppn_T_73 = hitsVec_11 ? _ppn_T_48 : 28'h0; // @[Mux.scala:30:73] wire [27:0] _ppn_T_74 = hitsVec_12 ? _ppn_T_60 : 28'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_75 = _ppn_T ? _ppn_T_61 : 20'h0; // @[Mux.scala:30:73] wire [19:0] _ppn_T_76 = _ppn_T_62 | _ppn_T_63; // @[Mux.scala:30:73] wire [19:0] _ppn_T_77 = _ppn_T_76 | _ppn_T_64; // @[Mux.scala:30:73] wire [19:0] _ppn_T_78 = _ppn_T_77 | _ppn_T_65; // @[Mux.scala:30:73] wire [19:0] _ppn_T_79 = _ppn_T_78 | _ppn_T_66; // @[Mux.scala:30:73] wire [19:0] _ppn_T_80 = _ppn_T_79 | _ppn_T_67; // @[Mux.scala:30:73] wire [19:0] _ppn_T_81 = _ppn_T_80 | _ppn_T_68; // @[Mux.scala:30:73] wire [19:0] _ppn_T_82 = _ppn_T_81 | _ppn_T_69; // @[Mux.scala:30:73] wire [27:0] _ppn_T_83 = {8'h0, _ppn_T_82} | _ppn_T_70; // @[Mux.scala:30:73] wire [27:0] _ppn_T_84 = _ppn_T_83 | _ppn_T_71; // @[Mux.scala:30:73] wire [27:0] _ppn_T_85 = _ppn_T_84 | _ppn_T_72; // @[Mux.scala:30:73] wire [27:0] _ppn_T_86 = _ppn_T_85 | _ppn_T_73; // @[Mux.scala:30:73] wire [27:0] _ppn_T_87 = _ppn_T_86 | _ppn_T_74; // @[Mux.scala:30:73] wire [27:0] _ppn_T_88 = {_ppn_T_87[27:20], _ppn_T_87[19:0] | _ppn_T_75}; // @[Mux.scala:30:73] wire [26:0] ppn; // @[Mux.scala:30:73] assign ppn = _ppn_T_88[26:0]; // @[Mux.scala:30:73] wire [1:0] ptw_ae_array_lo_lo_hi = {_entries_barrier_2_io_y_ae_ptw, _entries_barrier_1_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_ae_array_lo_lo = {ptw_ae_array_lo_lo_hi, _entries_barrier_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_ae_array_lo_hi_hi = {_entries_barrier_5_io_y_ae_ptw, _entries_barrier_4_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_ae_array_lo_hi = {ptw_ae_array_lo_hi_hi, _entries_barrier_3_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [5:0] ptw_ae_array_lo = {ptw_ae_array_lo_hi, ptw_ae_array_lo_lo}; // @[package.scala:45:27] wire [1:0] ptw_ae_array_hi_lo_hi = {_entries_barrier_8_io_y_ae_ptw, _entries_barrier_7_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_ae_array_hi_lo = {ptw_ae_array_hi_lo_hi, _entries_barrier_6_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_ae_array_hi_hi_lo = {_entries_barrier_10_io_y_ae_ptw, _entries_barrier_9_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_ae_array_hi_hi_hi = {_entries_barrier_12_io_y_ae_ptw, _entries_barrier_11_io_y_ae_ptw}; // @[package.scala:45:27, :267:25] wire [3:0] ptw_ae_array_hi_hi = {ptw_ae_array_hi_hi_hi, ptw_ae_array_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] ptw_ae_array_hi = {ptw_ae_array_hi_hi, ptw_ae_array_hi_lo}; // @[package.scala:45:27] wire [12:0] _ptw_ae_array_T = {ptw_ae_array_hi, ptw_ae_array_lo}; // @[package.scala:45:27] wire [13:0] ptw_ae_array = {1'h0, _ptw_ae_array_T}; // @[package.scala:45:27] wire [1:0] final_ae_array_lo_lo_hi = {_entries_barrier_2_io_y_ae_final, _entries_barrier_1_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [2:0] final_ae_array_lo_lo = {final_ae_array_lo_lo_hi, _entries_barrier_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [1:0] final_ae_array_lo_hi_hi = {_entries_barrier_5_io_y_ae_final, _entries_barrier_4_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [2:0] final_ae_array_lo_hi = {final_ae_array_lo_hi_hi, _entries_barrier_3_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [5:0] final_ae_array_lo = {final_ae_array_lo_hi, final_ae_array_lo_lo}; // @[package.scala:45:27] wire [1:0] final_ae_array_hi_lo_hi = {_entries_barrier_8_io_y_ae_final, _entries_barrier_7_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [2:0] final_ae_array_hi_lo = {final_ae_array_hi_lo_hi, _entries_barrier_6_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [1:0] final_ae_array_hi_hi_lo = {_entries_barrier_10_io_y_ae_final, _entries_barrier_9_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [1:0] final_ae_array_hi_hi_hi = {_entries_barrier_12_io_y_ae_final, _entries_barrier_11_io_y_ae_final}; // @[package.scala:45:27, :267:25] wire [3:0] final_ae_array_hi_hi = {final_ae_array_hi_hi_hi, final_ae_array_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] final_ae_array_hi = {final_ae_array_hi_hi, final_ae_array_hi_lo}; // @[package.scala:45:27] wire [12:0] _final_ae_array_T = {final_ae_array_hi, final_ae_array_lo}; // @[package.scala:45:27] wire [13:0] final_ae_array = {1'h0, _final_ae_array_T}; // @[package.scala:45:27] wire [1:0] ptw_pf_array_lo_lo_hi = {_entries_barrier_2_io_y_pf, _entries_barrier_1_io_y_pf}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_pf_array_lo_lo = {ptw_pf_array_lo_lo_hi, _entries_barrier_io_y_pf}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_pf_array_lo_hi_hi = {_entries_barrier_5_io_y_pf, _entries_barrier_4_io_y_pf}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_pf_array_lo_hi = {ptw_pf_array_lo_hi_hi, _entries_barrier_3_io_y_pf}; // @[package.scala:45:27, :267:25] wire [5:0] ptw_pf_array_lo = {ptw_pf_array_lo_hi, ptw_pf_array_lo_lo}; // @[package.scala:45:27] wire [1:0] ptw_pf_array_hi_lo_hi = {_entries_barrier_8_io_y_pf, _entries_barrier_7_io_y_pf}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_pf_array_hi_lo = {ptw_pf_array_hi_lo_hi, _entries_barrier_6_io_y_pf}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_pf_array_hi_hi_lo = {_entries_barrier_10_io_y_pf, _entries_barrier_9_io_y_pf}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_pf_array_hi_hi_hi = {_entries_barrier_12_io_y_pf, _entries_barrier_11_io_y_pf}; // @[package.scala:45:27, :267:25] wire [3:0] ptw_pf_array_hi_hi = {ptw_pf_array_hi_hi_hi, ptw_pf_array_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] ptw_pf_array_hi = {ptw_pf_array_hi_hi, ptw_pf_array_hi_lo}; // @[package.scala:45:27] wire [12:0] _ptw_pf_array_T = {ptw_pf_array_hi, ptw_pf_array_lo}; // @[package.scala:45:27] wire [13:0] ptw_pf_array = {1'h0, _ptw_pf_array_T}; // @[package.scala:45:27] wire [1:0] ptw_gf_array_lo_lo_hi = {_entries_barrier_2_io_y_gf, _entries_barrier_1_io_y_gf}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_gf_array_lo_lo = {ptw_gf_array_lo_lo_hi, _entries_barrier_io_y_gf}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_gf_array_lo_hi_hi = {_entries_barrier_5_io_y_gf, _entries_barrier_4_io_y_gf}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_gf_array_lo_hi = {ptw_gf_array_lo_hi_hi, _entries_barrier_3_io_y_gf}; // @[package.scala:45:27, :267:25] wire [5:0] ptw_gf_array_lo = {ptw_gf_array_lo_hi, ptw_gf_array_lo_lo}; // @[package.scala:45:27] wire [1:0] ptw_gf_array_hi_lo_hi = {_entries_barrier_8_io_y_gf, _entries_barrier_7_io_y_gf}; // @[package.scala:45:27, :267:25] wire [2:0] ptw_gf_array_hi_lo = {ptw_gf_array_hi_lo_hi, _entries_barrier_6_io_y_gf}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_gf_array_hi_hi_lo = {_entries_barrier_10_io_y_gf, _entries_barrier_9_io_y_gf}; // @[package.scala:45:27, :267:25] wire [1:0] ptw_gf_array_hi_hi_hi = {_entries_barrier_12_io_y_gf, _entries_barrier_11_io_y_gf}; // @[package.scala:45:27, :267:25] wire [3:0] ptw_gf_array_hi_hi = {ptw_gf_array_hi_hi_hi, ptw_gf_array_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] ptw_gf_array_hi = {ptw_gf_array_hi_hi, ptw_gf_array_hi_lo}; // @[package.scala:45:27] wire [12:0] _ptw_gf_array_T = {ptw_gf_array_hi, ptw_gf_array_lo}; // @[package.scala:45:27] wire [13:0] ptw_gf_array = {1'h0, _ptw_gf_array_T}; // @[package.scala:45:27] wire [13:0] _gf_ld_array_T_3 = ptw_gf_array; // @[TLB.scala:509:25, :600:82] wire [13:0] _gf_st_array_T_2 = ptw_gf_array; // @[TLB.scala:509:25, :601:63] wire [13:0] _gf_inst_array_T_1 = ptw_gf_array; // @[TLB.scala:509:25, :602:46] wire _priv_rw_ok_T = ~priv_s; // @[TLB.scala:370:20, :513:24] wire _priv_rw_ok_T_1 = _priv_rw_ok_T | sum; // @[TLB.scala:510:16, :513:{24,32}] wire [1:0] _GEN_45 = {_entries_barrier_2_io_y_u, _entries_barrier_1_io_y_u}; // @[package.scala:45:27, :267:25] wire [1:0] priv_rw_ok_lo_lo_hi; // @[package.scala:45:27] assign priv_rw_ok_lo_lo_hi = _GEN_45; // @[package.scala:45:27] wire [1:0] priv_rw_ok_lo_lo_hi_1; // @[package.scala:45:27] assign priv_rw_ok_lo_lo_hi_1 = _GEN_45; // @[package.scala:45:27] wire [1:0] priv_x_ok_lo_lo_hi; // @[package.scala:45:27] assign priv_x_ok_lo_lo_hi = _GEN_45; // @[package.scala:45:27] wire [1:0] priv_x_ok_lo_lo_hi_1; // @[package.scala:45:27] assign priv_x_ok_lo_lo_hi_1 = _GEN_45; // @[package.scala:45:27] wire [2:0] priv_rw_ok_lo_lo = {priv_rw_ok_lo_lo_hi, _entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25] wire [1:0] _GEN_46 = {_entries_barrier_5_io_y_u, _entries_barrier_4_io_y_u}; // @[package.scala:45:27, :267:25] wire [1:0] priv_rw_ok_lo_hi_hi; // @[package.scala:45:27] assign priv_rw_ok_lo_hi_hi = _GEN_46; // @[package.scala:45:27] wire [1:0] priv_rw_ok_lo_hi_hi_1; // @[package.scala:45:27] assign priv_rw_ok_lo_hi_hi_1 = _GEN_46; // @[package.scala:45:27] wire [1:0] priv_x_ok_lo_hi_hi; // @[package.scala:45:27] assign priv_x_ok_lo_hi_hi = _GEN_46; // @[package.scala:45:27] wire [1:0] priv_x_ok_lo_hi_hi_1; // @[package.scala:45:27] assign priv_x_ok_lo_hi_hi_1 = _GEN_46; // @[package.scala:45:27] wire [2:0] priv_rw_ok_lo_hi = {priv_rw_ok_lo_hi_hi, _entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25] wire [5:0] priv_rw_ok_lo = {priv_rw_ok_lo_hi, priv_rw_ok_lo_lo}; // @[package.scala:45:27] wire [1:0] _GEN_47 = {_entries_barrier_8_io_y_u, _entries_barrier_7_io_y_u}; // @[package.scala:45:27, :267:25] wire [1:0] priv_rw_ok_hi_lo_hi; // @[package.scala:45:27] assign priv_rw_ok_hi_lo_hi = _GEN_47; // @[package.scala:45:27] wire [1:0] priv_rw_ok_hi_lo_hi_1; // @[package.scala:45:27] assign priv_rw_ok_hi_lo_hi_1 = _GEN_47; // @[package.scala:45:27] wire [1:0] priv_x_ok_hi_lo_hi; // @[package.scala:45:27] assign priv_x_ok_hi_lo_hi = _GEN_47; // @[package.scala:45:27] wire [1:0] priv_x_ok_hi_lo_hi_1; // @[package.scala:45:27] assign priv_x_ok_hi_lo_hi_1 = _GEN_47; // @[package.scala:45:27] wire [2:0] priv_rw_ok_hi_lo = {priv_rw_ok_hi_lo_hi, _entries_barrier_6_io_y_u}; // @[package.scala:45:27, :267:25] wire [1:0] _GEN_48 = {_entries_barrier_10_io_y_u, _entries_barrier_9_io_y_u}; // @[package.scala:45:27, :267:25] wire [1:0] priv_rw_ok_hi_hi_lo; // @[package.scala:45:27] assign priv_rw_ok_hi_hi_lo = _GEN_48; // @[package.scala:45:27] wire [1:0] priv_rw_ok_hi_hi_lo_1; // @[package.scala:45:27] assign priv_rw_ok_hi_hi_lo_1 = _GEN_48; // @[package.scala:45:27] wire [1:0] priv_x_ok_hi_hi_lo; // @[package.scala:45:27] assign priv_x_ok_hi_hi_lo = _GEN_48; // @[package.scala:45:27] wire [1:0] priv_x_ok_hi_hi_lo_1; // @[package.scala:45:27] assign priv_x_ok_hi_hi_lo_1 = _GEN_48; // @[package.scala:45:27] wire [1:0] _GEN_49 = {_entries_barrier_12_io_y_u, _entries_barrier_11_io_y_u}; // @[package.scala:45:27, :267:25] wire [1:0] priv_rw_ok_hi_hi_hi; // @[package.scala:45:27] assign priv_rw_ok_hi_hi_hi = _GEN_49; // @[package.scala:45:27] wire [1:0] priv_rw_ok_hi_hi_hi_1; // @[package.scala:45:27] assign priv_rw_ok_hi_hi_hi_1 = _GEN_49; // @[package.scala:45:27] wire [1:0] priv_x_ok_hi_hi_hi; // @[package.scala:45:27] assign priv_x_ok_hi_hi_hi = _GEN_49; // @[package.scala:45:27] wire [1:0] priv_x_ok_hi_hi_hi_1; // @[package.scala:45:27] assign priv_x_ok_hi_hi_hi_1 = _GEN_49; // @[package.scala:45:27] wire [3:0] priv_rw_ok_hi_hi = {priv_rw_ok_hi_hi_hi, priv_rw_ok_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] priv_rw_ok_hi = {priv_rw_ok_hi_hi, priv_rw_ok_hi_lo}; // @[package.scala:45:27] wire [12:0] _priv_rw_ok_T_2 = {priv_rw_ok_hi, priv_rw_ok_lo}; // @[package.scala:45:27] wire [12:0] _priv_rw_ok_T_3 = _priv_rw_ok_T_1 ? _priv_rw_ok_T_2 : 13'h0; // @[package.scala:45:27] wire [2:0] priv_rw_ok_lo_lo_1 = {priv_rw_ok_lo_lo_hi_1, _entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25] wire [2:0] priv_rw_ok_lo_hi_1 = {priv_rw_ok_lo_hi_hi_1, _entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25] wire [5:0] priv_rw_ok_lo_1 = {priv_rw_ok_lo_hi_1, priv_rw_ok_lo_lo_1}; // @[package.scala:45:27] wire [2:0] priv_rw_ok_hi_lo_1 = {priv_rw_ok_hi_lo_hi_1, _entries_barrier_6_io_y_u}; // @[package.scala:45:27, :267:25] wire [3:0] priv_rw_ok_hi_hi_1 = {priv_rw_ok_hi_hi_hi_1, priv_rw_ok_hi_hi_lo_1}; // @[package.scala:45:27] wire [6:0] priv_rw_ok_hi_1 = {priv_rw_ok_hi_hi_1, priv_rw_ok_hi_lo_1}; // @[package.scala:45:27] wire [12:0] _priv_rw_ok_T_4 = {priv_rw_ok_hi_1, priv_rw_ok_lo_1}; // @[package.scala:45:27] wire [12:0] _priv_rw_ok_T_5 = ~_priv_rw_ok_T_4; // @[package.scala:45:27] wire [12:0] _priv_rw_ok_T_6 = priv_s ? _priv_rw_ok_T_5 : 13'h0; // @[TLB.scala:370:20, :513:{75,84}] wire [12:0] priv_rw_ok = _priv_rw_ok_T_3 | _priv_rw_ok_T_6; // @[TLB.scala:513:{23,70,75}] wire [2:0] priv_x_ok_lo_lo = {priv_x_ok_lo_lo_hi, _entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25] wire [2:0] priv_x_ok_lo_hi = {priv_x_ok_lo_hi_hi, _entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25] wire [5:0] priv_x_ok_lo = {priv_x_ok_lo_hi, priv_x_ok_lo_lo}; // @[package.scala:45:27] wire [2:0] priv_x_ok_hi_lo = {priv_x_ok_hi_lo_hi, _entries_barrier_6_io_y_u}; // @[package.scala:45:27, :267:25] wire [3:0] priv_x_ok_hi_hi = {priv_x_ok_hi_hi_hi, priv_x_ok_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] priv_x_ok_hi = {priv_x_ok_hi_hi, priv_x_ok_hi_lo}; // @[package.scala:45:27] wire [12:0] _priv_x_ok_T = {priv_x_ok_hi, priv_x_ok_lo}; // @[package.scala:45:27] wire [12:0] _priv_x_ok_T_1 = ~_priv_x_ok_T; // @[package.scala:45:27] wire [2:0] priv_x_ok_lo_lo_1 = {priv_x_ok_lo_lo_hi_1, _entries_barrier_io_y_u}; // @[package.scala:45:27, :267:25] wire [2:0] priv_x_ok_lo_hi_1 = {priv_x_ok_lo_hi_hi_1, _entries_barrier_3_io_y_u}; // @[package.scala:45:27, :267:25] wire [5:0] priv_x_ok_lo_1 = {priv_x_ok_lo_hi_1, priv_x_ok_lo_lo_1}; // @[package.scala:45:27] wire [2:0] priv_x_ok_hi_lo_1 = {priv_x_ok_hi_lo_hi_1, _entries_barrier_6_io_y_u}; // @[package.scala:45:27, :267:25] wire [3:0] priv_x_ok_hi_hi_1 = {priv_x_ok_hi_hi_hi_1, priv_x_ok_hi_hi_lo_1}; // @[package.scala:45:27] wire [6:0] priv_x_ok_hi_1 = {priv_x_ok_hi_hi_1, priv_x_ok_hi_lo_1}; // @[package.scala:45:27] wire [12:0] _priv_x_ok_T_2 = {priv_x_ok_hi_1, priv_x_ok_lo_1}; // @[package.scala:45:27] wire [12:0] priv_x_ok = priv_s ? _priv_x_ok_T_1 : _priv_x_ok_T_2; // @[package.scala:45:27] wire _stage1_bypass_T_1 = ~stage1_en; // @[TLB.scala:374:29, :517:83] wire [12:0] _stage1_bypass_T_2 = {13{_stage1_bypass_T_1}}; // @[TLB.scala:517:{68,83}] wire [1:0] stage1_bypass_lo_lo_hi = {_entries_barrier_2_io_y_ae_stage2, _entries_barrier_1_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [2:0] stage1_bypass_lo_lo = {stage1_bypass_lo_lo_hi, _entries_barrier_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [1:0] stage1_bypass_lo_hi_hi = {_entries_barrier_5_io_y_ae_stage2, _entries_barrier_4_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [2:0] stage1_bypass_lo_hi = {stage1_bypass_lo_hi_hi, _entries_barrier_3_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [5:0] stage1_bypass_lo = {stage1_bypass_lo_hi, stage1_bypass_lo_lo}; // @[package.scala:45:27] wire [1:0] stage1_bypass_hi_lo_hi = {_entries_barrier_8_io_y_ae_stage2, _entries_barrier_7_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [2:0] stage1_bypass_hi_lo = {stage1_bypass_hi_lo_hi, _entries_barrier_6_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [1:0] stage1_bypass_hi_hi_lo = {_entries_barrier_10_io_y_ae_stage2, _entries_barrier_9_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [1:0] stage1_bypass_hi_hi_hi = {_entries_barrier_12_io_y_ae_stage2, _entries_barrier_11_io_y_ae_stage2}; // @[package.scala:45:27, :267:25] wire [3:0] stage1_bypass_hi_hi = {stage1_bypass_hi_hi_hi, stage1_bypass_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] stage1_bypass_hi = {stage1_bypass_hi_hi, stage1_bypass_hi_lo}; // @[package.scala:45:27] wire [12:0] _stage1_bypass_T_3 = {stage1_bypass_hi, stage1_bypass_lo}; // @[package.scala:45:27] wire [12:0] _stage1_bypass_T_4 = _stage1_bypass_T_2 | _stage1_bypass_T_3; // @[package.scala:45:27] wire [1:0] r_array_lo_lo_hi = {_entries_barrier_2_io_y_sr, _entries_barrier_1_io_y_sr}; // @[package.scala:45:27, :267:25] wire [2:0] r_array_lo_lo = {r_array_lo_lo_hi, _entries_barrier_io_y_sr}; // @[package.scala:45:27, :267:25] wire [1:0] r_array_lo_hi_hi = {_entries_barrier_5_io_y_sr, _entries_barrier_4_io_y_sr}; // @[package.scala:45:27, :267:25] wire [2:0] r_array_lo_hi = {r_array_lo_hi_hi, _entries_barrier_3_io_y_sr}; // @[package.scala:45:27, :267:25] wire [5:0] r_array_lo = {r_array_lo_hi, r_array_lo_lo}; // @[package.scala:45:27] wire [1:0] r_array_hi_lo_hi = {_entries_barrier_8_io_y_sr, _entries_barrier_7_io_y_sr}; // @[package.scala:45:27, :267:25] wire [2:0] r_array_hi_lo = {r_array_hi_lo_hi, _entries_barrier_6_io_y_sr}; // @[package.scala:45:27, :267:25] wire [1:0] r_array_hi_hi_lo = {_entries_barrier_10_io_y_sr, _entries_barrier_9_io_y_sr}; // @[package.scala:45:27, :267:25] wire [1:0] r_array_hi_hi_hi = {_entries_barrier_12_io_y_sr, _entries_barrier_11_io_y_sr}; // @[package.scala:45:27, :267:25] wire [3:0] r_array_hi_hi = {r_array_hi_hi_hi, r_array_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] r_array_hi = {r_array_hi_hi, r_array_hi_lo}; // @[package.scala:45:27] wire [12:0] _r_array_T = {r_array_hi, r_array_lo}; // @[package.scala:45:27] wire [1:0] _GEN_50 = {_entries_barrier_2_io_y_sx, _entries_barrier_1_io_y_sx}; // @[package.scala:45:27, :267:25] wire [1:0] r_array_lo_lo_hi_1; // @[package.scala:45:27] assign r_array_lo_lo_hi_1 = _GEN_50; // @[package.scala:45:27] wire [1:0] x_array_lo_lo_hi; // @[package.scala:45:27] assign x_array_lo_lo_hi = _GEN_50; // @[package.scala:45:27] wire [2:0] r_array_lo_lo_1 = {r_array_lo_lo_hi_1, _entries_barrier_io_y_sx}; // @[package.scala:45:27, :267:25] wire [1:0] _GEN_51 = {_entries_barrier_5_io_y_sx, _entries_barrier_4_io_y_sx}; // @[package.scala:45:27, :267:25] wire [1:0] r_array_lo_hi_hi_1; // @[package.scala:45:27] assign r_array_lo_hi_hi_1 = _GEN_51; // @[package.scala:45:27] wire [1:0] x_array_lo_hi_hi; // @[package.scala:45:27] assign x_array_lo_hi_hi = _GEN_51; // @[package.scala:45:27] wire [2:0] r_array_lo_hi_1 = {r_array_lo_hi_hi_1, _entries_barrier_3_io_y_sx}; // @[package.scala:45:27, :267:25] wire [5:0] r_array_lo_1 = {r_array_lo_hi_1, r_array_lo_lo_1}; // @[package.scala:45:27] wire [1:0] _GEN_52 = {_entries_barrier_8_io_y_sx, _entries_barrier_7_io_y_sx}; // @[package.scala:45:27, :267:25] wire [1:0] r_array_hi_lo_hi_1; // @[package.scala:45:27] assign r_array_hi_lo_hi_1 = _GEN_52; // @[package.scala:45:27] wire [1:0] x_array_hi_lo_hi; // @[package.scala:45:27] assign x_array_hi_lo_hi = _GEN_52; // @[package.scala:45:27] wire [2:0] r_array_hi_lo_1 = {r_array_hi_lo_hi_1, _entries_barrier_6_io_y_sx}; // @[package.scala:45:27, :267:25] wire [1:0] _GEN_53 = {_entries_barrier_10_io_y_sx, _entries_barrier_9_io_y_sx}; // @[package.scala:45:27, :267:25] wire [1:0] r_array_hi_hi_lo_1; // @[package.scala:45:27] assign r_array_hi_hi_lo_1 = _GEN_53; // @[package.scala:45:27] wire [1:0] x_array_hi_hi_lo; // @[package.scala:45:27] assign x_array_hi_hi_lo = _GEN_53; // @[package.scala:45:27] wire [1:0] _GEN_54 = {_entries_barrier_12_io_y_sx, _entries_barrier_11_io_y_sx}; // @[package.scala:45:27, :267:25] wire [1:0] r_array_hi_hi_hi_1; // @[package.scala:45:27] assign r_array_hi_hi_hi_1 = _GEN_54; // @[package.scala:45:27] wire [1:0] x_array_hi_hi_hi; // @[package.scala:45:27] assign x_array_hi_hi_hi = _GEN_54; // @[package.scala:45:27] wire [3:0] r_array_hi_hi_1 = {r_array_hi_hi_hi_1, r_array_hi_hi_lo_1}; // @[package.scala:45:27] wire [6:0] r_array_hi_1 = {r_array_hi_hi_1, r_array_hi_lo_1}; // @[package.scala:45:27] wire [12:0] _r_array_T_1 = {r_array_hi_1, r_array_lo_1}; // @[package.scala:45:27] wire [12:0] _r_array_T_2 = mxr ? _r_array_T_1 : 13'h0; // @[package.scala:45:27] wire [12:0] _r_array_T_3 = _r_array_T | _r_array_T_2; // @[package.scala:45:27] wire [12:0] _r_array_T_4 = priv_rw_ok & _r_array_T_3; // @[TLB.scala:513:70, :520:{41,69}] wire [12:0] _r_array_T_5 = _r_array_T_4; // @[TLB.scala:520:{41,113}] wire [13:0] r_array = {1'h1, _r_array_T_5}; // @[TLB.scala:520:{20,113}] wire [13:0] _pf_ld_array_T = r_array; // @[TLB.scala:520:20, :597:41] wire [1:0] w_array_lo_lo_hi = {_entries_barrier_2_io_y_sw, _entries_barrier_1_io_y_sw}; // @[package.scala:45:27, :267:25] wire [2:0] w_array_lo_lo = {w_array_lo_lo_hi, _entries_barrier_io_y_sw}; // @[package.scala:45:27, :267:25] wire [1:0] w_array_lo_hi_hi = {_entries_barrier_5_io_y_sw, _entries_barrier_4_io_y_sw}; // @[package.scala:45:27, :267:25] wire [2:0] w_array_lo_hi = {w_array_lo_hi_hi, _entries_barrier_3_io_y_sw}; // @[package.scala:45:27, :267:25] wire [5:0] w_array_lo = {w_array_lo_hi, w_array_lo_lo}; // @[package.scala:45:27] wire [1:0] w_array_hi_lo_hi = {_entries_barrier_8_io_y_sw, _entries_barrier_7_io_y_sw}; // @[package.scala:45:27, :267:25] wire [2:0] w_array_hi_lo = {w_array_hi_lo_hi, _entries_barrier_6_io_y_sw}; // @[package.scala:45:27, :267:25] wire [1:0] w_array_hi_hi_lo = {_entries_barrier_10_io_y_sw, _entries_barrier_9_io_y_sw}; // @[package.scala:45:27, :267:25] wire [1:0] w_array_hi_hi_hi = {_entries_barrier_12_io_y_sw, _entries_barrier_11_io_y_sw}; // @[package.scala:45:27, :267:25] wire [3:0] w_array_hi_hi = {w_array_hi_hi_hi, w_array_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] w_array_hi = {w_array_hi_hi, w_array_hi_lo}; // @[package.scala:45:27] wire [12:0] _w_array_T = {w_array_hi, w_array_lo}; // @[package.scala:45:27] wire [12:0] _w_array_T_1 = priv_rw_ok & _w_array_T; // @[package.scala:45:27] wire [12:0] _w_array_T_2 = _w_array_T_1; // @[TLB.scala:521:{41,69}] wire [13:0] w_array = {1'h1, _w_array_T_2}; // @[TLB.scala:521:{20,69}] wire [2:0] x_array_lo_lo = {x_array_lo_lo_hi, _entries_barrier_io_y_sx}; // @[package.scala:45:27, :267:25] wire [2:0] x_array_lo_hi = {x_array_lo_hi_hi, _entries_barrier_3_io_y_sx}; // @[package.scala:45:27, :267:25] wire [5:0] x_array_lo = {x_array_lo_hi, x_array_lo_lo}; // @[package.scala:45:27] wire [2:0] x_array_hi_lo = {x_array_hi_lo_hi, _entries_barrier_6_io_y_sx}; // @[package.scala:45:27, :267:25] wire [3:0] x_array_hi_hi = {x_array_hi_hi_hi, x_array_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] x_array_hi = {x_array_hi_hi, x_array_hi_lo}; // @[package.scala:45:27] wire [12:0] _x_array_T = {x_array_hi, x_array_lo}; // @[package.scala:45:27] wire [12:0] _x_array_T_1 = priv_x_ok & _x_array_T; // @[package.scala:45:27] wire [12:0] _x_array_T_2 = _x_array_T_1; // @[TLB.scala:522:{40,68}] wire [13:0] x_array = {1'h1, _x_array_T_2}; // @[TLB.scala:522:{20,68}] wire [1:0] hr_array_lo_lo_hi = {_entries_barrier_2_io_y_hr, _entries_barrier_1_io_y_hr}; // @[package.scala:45:27, :267:25] wire [2:0] hr_array_lo_lo = {hr_array_lo_lo_hi, _entries_barrier_io_y_hr}; // @[package.scala:45:27, :267:25] wire [1:0] hr_array_lo_hi_hi = {_entries_barrier_5_io_y_hr, _entries_barrier_4_io_y_hr}; // @[package.scala:45:27, :267:25] wire [2:0] hr_array_lo_hi = {hr_array_lo_hi_hi, _entries_barrier_3_io_y_hr}; // @[package.scala:45:27, :267:25] wire [5:0] hr_array_lo = {hr_array_lo_hi, hr_array_lo_lo}; // @[package.scala:45:27] wire [1:0] hr_array_hi_lo_hi = {_entries_barrier_8_io_y_hr, _entries_barrier_7_io_y_hr}; // @[package.scala:45:27, :267:25] wire [2:0] hr_array_hi_lo = {hr_array_hi_lo_hi, _entries_barrier_6_io_y_hr}; // @[package.scala:45:27, :267:25] wire [1:0] hr_array_hi_hi_lo = {_entries_barrier_10_io_y_hr, _entries_barrier_9_io_y_hr}; // @[package.scala:45:27, :267:25] wire [1:0] hr_array_hi_hi_hi = {_entries_barrier_12_io_y_hr, _entries_barrier_11_io_y_hr}; // @[package.scala:45:27, :267:25] wire [3:0] hr_array_hi_hi = {hr_array_hi_hi_hi, hr_array_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] hr_array_hi = {hr_array_hi_hi, hr_array_hi_lo}; // @[package.scala:45:27] wire [12:0] _hr_array_T = {hr_array_hi, hr_array_lo}; // @[package.scala:45:27] wire [1:0] _GEN_55 = {_entries_barrier_2_io_y_hx, _entries_barrier_1_io_y_hx}; // @[package.scala:45:27, :267:25] wire [1:0] hr_array_lo_lo_hi_1; // @[package.scala:45:27] assign hr_array_lo_lo_hi_1 = _GEN_55; // @[package.scala:45:27] wire [1:0] hx_array_lo_lo_hi; // @[package.scala:45:27] assign hx_array_lo_lo_hi = _GEN_55; // @[package.scala:45:27] wire [2:0] hr_array_lo_lo_1 = {hr_array_lo_lo_hi_1, _entries_barrier_io_y_hx}; // @[package.scala:45:27, :267:25] wire [1:0] _GEN_56 = {_entries_barrier_5_io_y_hx, _entries_barrier_4_io_y_hx}; // @[package.scala:45:27, :267:25] wire [1:0] hr_array_lo_hi_hi_1; // @[package.scala:45:27] assign hr_array_lo_hi_hi_1 = _GEN_56; // @[package.scala:45:27] wire [1:0] hx_array_lo_hi_hi; // @[package.scala:45:27] assign hx_array_lo_hi_hi = _GEN_56; // @[package.scala:45:27] wire [2:0] hr_array_lo_hi_1 = {hr_array_lo_hi_hi_1, _entries_barrier_3_io_y_hx}; // @[package.scala:45:27, :267:25] wire [5:0] hr_array_lo_1 = {hr_array_lo_hi_1, hr_array_lo_lo_1}; // @[package.scala:45:27] wire [1:0] _GEN_57 = {_entries_barrier_8_io_y_hx, _entries_barrier_7_io_y_hx}; // @[package.scala:45:27, :267:25] wire [1:0] hr_array_hi_lo_hi_1; // @[package.scala:45:27] assign hr_array_hi_lo_hi_1 = _GEN_57; // @[package.scala:45:27] wire [1:0] hx_array_hi_lo_hi; // @[package.scala:45:27] assign hx_array_hi_lo_hi = _GEN_57; // @[package.scala:45:27] wire [2:0] hr_array_hi_lo_1 = {hr_array_hi_lo_hi_1, _entries_barrier_6_io_y_hx}; // @[package.scala:45:27, :267:25] wire [1:0] _GEN_58 = {_entries_barrier_10_io_y_hx, _entries_barrier_9_io_y_hx}; // @[package.scala:45:27, :267:25] wire [1:0] hr_array_hi_hi_lo_1; // @[package.scala:45:27] assign hr_array_hi_hi_lo_1 = _GEN_58; // @[package.scala:45:27] wire [1:0] hx_array_hi_hi_lo; // @[package.scala:45:27] assign hx_array_hi_hi_lo = _GEN_58; // @[package.scala:45:27] wire [1:0] _GEN_59 = {_entries_barrier_12_io_y_hx, _entries_barrier_11_io_y_hx}; // @[package.scala:45:27, :267:25] wire [1:0] hr_array_hi_hi_hi_1; // @[package.scala:45:27] assign hr_array_hi_hi_hi_1 = _GEN_59; // @[package.scala:45:27] wire [1:0] hx_array_hi_hi_hi; // @[package.scala:45:27] assign hx_array_hi_hi_hi = _GEN_59; // @[package.scala:45:27] wire [3:0] hr_array_hi_hi_1 = {hr_array_hi_hi_hi_1, hr_array_hi_hi_lo_1}; // @[package.scala:45:27] wire [6:0] hr_array_hi_1 = {hr_array_hi_hi_1, hr_array_hi_lo_1}; // @[package.scala:45:27] wire [12:0] _hr_array_T_1 = {hr_array_hi_1, hr_array_lo_1}; // @[package.scala:45:27] wire [12:0] _hr_array_T_2 = io_ptw_status_mxr_0 ? _hr_array_T_1 : 13'h0; // @[package.scala:45:27] wire [12:0] _hr_array_T_3 = _hr_array_T | _hr_array_T_2; // @[package.scala:45:27] wire [1:0] hw_array_lo_lo_hi = {_entries_barrier_2_io_y_hw, _entries_barrier_1_io_y_hw}; // @[package.scala:45:27, :267:25] wire [2:0] hw_array_lo_lo = {hw_array_lo_lo_hi, _entries_barrier_io_y_hw}; // @[package.scala:45:27, :267:25] wire [1:0] hw_array_lo_hi_hi = {_entries_barrier_5_io_y_hw, _entries_barrier_4_io_y_hw}; // @[package.scala:45:27, :267:25] wire [2:0] hw_array_lo_hi = {hw_array_lo_hi_hi, _entries_barrier_3_io_y_hw}; // @[package.scala:45:27, :267:25] wire [5:0] hw_array_lo = {hw_array_lo_hi, hw_array_lo_lo}; // @[package.scala:45:27] wire [1:0] hw_array_hi_lo_hi = {_entries_barrier_8_io_y_hw, _entries_barrier_7_io_y_hw}; // @[package.scala:45:27, :267:25] wire [2:0] hw_array_hi_lo = {hw_array_hi_lo_hi, _entries_barrier_6_io_y_hw}; // @[package.scala:45:27, :267:25] wire [1:0] hw_array_hi_hi_lo = {_entries_barrier_10_io_y_hw, _entries_barrier_9_io_y_hw}; // @[package.scala:45:27, :267:25] wire [1:0] hw_array_hi_hi_hi = {_entries_barrier_12_io_y_hw, _entries_barrier_11_io_y_hw}; // @[package.scala:45:27, :267:25] wire [3:0] hw_array_hi_hi = {hw_array_hi_hi_hi, hw_array_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] hw_array_hi = {hw_array_hi_hi, hw_array_hi_lo}; // @[package.scala:45:27] wire [12:0] _hw_array_T = {hw_array_hi, hw_array_lo}; // @[package.scala:45:27] wire [2:0] hx_array_lo_lo = {hx_array_lo_lo_hi, _entries_barrier_io_y_hx}; // @[package.scala:45:27, :267:25] wire [2:0] hx_array_lo_hi = {hx_array_lo_hi_hi, _entries_barrier_3_io_y_hx}; // @[package.scala:45:27, :267:25] wire [5:0] hx_array_lo = {hx_array_lo_hi, hx_array_lo_lo}; // @[package.scala:45:27] wire [2:0] hx_array_hi_lo = {hx_array_hi_lo_hi, _entries_barrier_6_io_y_hx}; // @[package.scala:45:27, :267:25] wire [3:0] hx_array_hi_hi = {hx_array_hi_hi_hi, hx_array_hi_hi_lo}; // @[package.scala:45:27] wire [6:0] hx_array_hi = {hx_array_hi_hi, hx_array_hi_lo}; // @[package.scala:45:27] wire [12:0] _hx_array_T = {hx_array_hi, hx_array_lo}; // @[package.scala:45:27] wire [1:0] _pr_array_T = {2{prot_r}}; // @[TLB.scala:429:55, :529:26] wire [1:0] pr_array_lo_lo_hi = {_entries_barrier_2_io_y_pr, _entries_barrier_1_io_y_pr}; // @[package.scala:45:27, :267:25] wire [2:0] pr_array_lo_lo = {pr_array_lo_lo_hi, _entries_barrier_io_y_pr}; // @[package.scala:45:27, :267:25] wire [1:0] pr_array_lo_hi_hi = {_entries_barrier_5_io_y_pr, _entries_barrier_4_io_y_pr}; // @[package.scala:45:27, :267:25] wire [2:0] pr_array_lo_hi = {pr_array_lo_hi_hi, _entries_barrier_3_io_y_pr}; // @[package.scala:45:27, :267:25] wire [5:0] pr_array_lo = {pr_array_lo_hi, pr_array_lo_lo}; // @[package.scala:45:27] wire [1:0] pr_array_hi_lo_hi = {_entries_barrier_8_io_y_pr, _entries_barrier_7_io_y_pr}; // @[package.scala:45:27, :267:25] wire [2:0] pr_array_hi_lo = {pr_array_hi_lo_hi, _entries_barrier_6_io_y_pr}; // @[package.scala:45:27, :267:25] wire [1:0] pr_array_hi_hi_hi = {_entries_barrier_11_io_y_pr, _entries_barrier_10_io_y_pr}; // @[package.scala:45:27, :267:25] wire [2:0] pr_array_hi_hi = {pr_array_hi_hi_hi, _entries_barrier_9_io_y_pr}; // @[package.scala:45:27, :267:25] wire [5:0] pr_array_hi = {pr_array_hi_hi, pr_array_hi_lo}; // @[package.scala:45:27] wire [11:0] _pr_array_T_1 = {pr_array_hi, pr_array_lo}; // @[package.scala:45:27] wire [13:0] _pr_array_T_2 = {_pr_array_T, _pr_array_T_1}; // @[package.scala:45:27] wire [13:0] _GEN_60 = ptw_ae_array | final_ae_array; // @[TLB.scala:506:25, :507:27, :529:104] wire [13:0] _pr_array_T_3; // @[TLB.scala:529:104] assign _pr_array_T_3 = _GEN_60; // @[TLB.scala:529:104] wire [13:0] _pw_array_T_3; // @[TLB.scala:531:104] assign _pw_array_T_3 = _GEN_60; // @[TLB.scala:529:104, :531:104] wire [13:0] _px_array_T_3; // @[TLB.scala:533:104] assign _px_array_T_3 = _GEN_60; // @[TLB.scala:529:104, :533:104] wire [13:0] _pr_array_T_4 = ~_pr_array_T_3; // @[TLB.scala:529:{89,104}] wire [13:0] pr_array = _pr_array_T_2 & _pr_array_T_4; // @[TLB.scala:529:{21,87,89}] wire [1:0] _pw_array_T = {2{prot_w}}; // @[TLB.scala:430:55, :531:26] wire [1:0] pw_array_lo_lo_hi = {_entries_barrier_2_io_y_pw, _entries_barrier_1_io_y_pw}; // @[package.scala:45:27, :267:25] wire [2:0] pw_array_lo_lo = {pw_array_lo_lo_hi, _entries_barrier_io_y_pw}; // @[package.scala:45:27, :267:25] wire [1:0] pw_array_lo_hi_hi = {_entries_barrier_5_io_y_pw, _entries_barrier_4_io_y_pw}; // @[package.scala:45:27, :267:25] wire [2:0] pw_array_lo_hi = {pw_array_lo_hi_hi, _entries_barrier_3_io_y_pw}; // @[package.scala:45:27, :267:25] wire [5:0] pw_array_lo = {pw_array_lo_hi, pw_array_lo_lo}; // @[package.scala:45:27] wire [1:0] pw_array_hi_lo_hi = {_entries_barrier_8_io_y_pw, _entries_barrier_7_io_y_pw}; // @[package.scala:45:27, :267:25] wire [2:0] pw_array_hi_lo = {pw_array_hi_lo_hi, _entries_barrier_6_io_y_pw}; // @[package.scala:45:27, :267:25] wire [1:0] pw_array_hi_hi_hi = {_entries_barrier_11_io_y_pw, _entries_barrier_10_io_y_pw}; // @[package.scala:45:27, :267:25] wire [2:0] pw_array_hi_hi = {pw_array_hi_hi_hi, _entries_barrier_9_io_y_pw}; // @[package.scala:45:27, :267:25] wire [5:0] pw_array_hi = {pw_array_hi_hi, pw_array_hi_lo}; // @[package.scala:45:27] wire [11:0] _pw_array_T_1 = {pw_array_hi, pw_array_lo}; // @[package.scala:45:27] wire [13:0] _pw_array_T_2 = {_pw_array_T, _pw_array_T_1}; // @[package.scala:45:27] wire [13:0] _pw_array_T_4 = ~_pw_array_T_3; // @[TLB.scala:531:{89,104}] wire [13:0] pw_array = _pw_array_T_2 & _pw_array_T_4; // @[TLB.scala:531:{21,87,89}] wire [1:0] _px_array_T = {2{prot_x}}; // @[TLB.scala:434:55, :533:26] wire [1:0] px_array_lo_lo_hi = {_entries_barrier_2_io_y_px, _entries_barrier_1_io_y_px}; // @[package.scala:45:27, :267:25] wire [2:0] px_array_lo_lo = {px_array_lo_lo_hi, _entries_barrier_io_y_px}; // @[package.scala:45:27, :267:25] wire [1:0] px_array_lo_hi_hi = {_entries_barrier_5_io_y_px, _entries_barrier_4_io_y_px}; // @[package.scala:45:27, :267:25] wire [2:0] px_array_lo_hi = {px_array_lo_hi_hi, _entries_barrier_3_io_y_px}; // @[package.scala:45:27, :267:25] wire [5:0] px_array_lo = {px_array_lo_hi, px_array_lo_lo}; // @[package.scala:45:27] wire [1:0] px_array_hi_lo_hi = {_entries_barrier_8_io_y_px, _entries_barrier_7_io_y_px}; // @[package.scala:45:27, :267:25] wire [2:0] px_array_hi_lo = {px_array_hi_lo_hi, _entries_barrier_6_io_y_px}; // @[package.scala:45:27, :267:25] wire [1:0] px_array_hi_hi_hi = {_entries_barrier_11_io_y_px, _entries_barrier_10_io_y_px}; // @[package.scala:45:27, :267:25] wire [2:0] px_array_hi_hi = {px_array_hi_hi_hi, _entries_barrier_9_io_y_px}; // @[package.scala:45:27, :267:25] wire [5:0] px_array_hi = {px_array_hi_hi, px_array_hi_lo}; // @[package.scala:45:27] wire [11:0] _px_array_T_1 = {px_array_hi, px_array_lo}; // @[package.scala:45:27] wire [13:0] _px_array_T_2 = {_px_array_T, _px_array_T_1}; // @[package.scala:45:27] wire [13:0] _px_array_T_4 = ~_px_array_T_3; // @[TLB.scala:533:{89,104}] wire [13:0] px_array = _px_array_T_2 & _px_array_T_4; // @[TLB.scala:533:{21,87,89}] wire [1:0] _eff_array_T = {2{_pma_io_resp_eff}}; // @[TLB.scala:422:19, :535:27] wire [1:0] eff_array_lo_lo_hi = {_entries_barrier_2_io_y_eff, _entries_barrier_1_io_y_eff}; // @[package.scala:45:27, :267:25] wire [2:0] eff_array_lo_lo = {eff_array_lo_lo_hi, _entries_barrier_io_y_eff}; // @[package.scala:45:27, :267:25] wire [1:0] eff_array_lo_hi_hi = {_entries_barrier_5_io_y_eff, _entries_barrier_4_io_y_eff}; // @[package.scala:45:27, :267:25] wire [2:0] eff_array_lo_hi = {eff_array_lo_hi_hi, _entries_barrier_3_io_y_eff}; // @[package.scala:45:27, :267:25] wire [5:0] eff_array_lo = {eff_array_lo_hi, eff_array_lo_lo}; // @[package.scala:45:27] wire [1:0] eff_array_hi_lo_hi = {_entries_barrier_8_io_y_eff, _entries_barrier_7_io_y_eff}; // @[package.scala:45:27, :267:25] wire [2:0] eff_array_hi_lo = {eff_array_hi_lo_hi, _entries_barrier_6_io_y_eff}; // @[package.scala:45:27, :267:25] wire [1:0] eff_array_hi_hi_hi = {_entries_barrier_11_io_y_eff, _entries_barrier_10_io_y_eff}; // @[package.scala:45:27, :267:25] wire [2:0] eff_array_hi_hi = {eff_array_hi_hi_hi, _entries_barrier_9_io_y_eff}; // @[package.scala:45:27, :267:25] wire [5:0] eff_array_hi = {eff_array_hi_hi, eff_array_hi_lo}; // @[package.scala:45:27] wire [11:0] _eff_array_T_1 = {eff_array_hi, eff_array_lo}; // @[package.scala:45:27] wire [13:0] eff_array = {_eff_array_T, _eff_array_T_1}; // @[package.scala:45:27] wire [1:0] _c_array_T = {2{cacheable}}; // @[TLB.scala:425:41, :537:25] wire [1:0] _GEN_61 = {_entries_barrier_2_io_y_c, _entries_barrier_1_io_y_c}; // @[package.scala:45:27, :267:25] wire [1:0] c_array_lo_lo_hi; // @[package.scala:45:27] assign c_array_lo_lo_hi = _GEN_61; // @[package.scala:45:27] wire [1:0] prefetchable_array_lo_lo_hi; // @[package.scala:45:27] assign prefetchable_array_lo_lo_hi = _GEN_61; // @[package.scala:45:27] wire [2:0] c_array_lo_lo = {c_array_lo_lo_hi, _entries_barrier_io_y_c}; // @[package.scala:45:27, :267:25] wire [1:0] _GEN_62 = {_entries_barrier_5_io_y_c, _entries_barrier_4_io_y_c}; // @[package.scala:45:27, :267:25] wire [1:0] c_array_lo_hi_hi; // @[package.scala:45:27] assign c_array_lo_hi_hi = _GEN_62; // @[package.scala:45:27] wire [1:0] prefetchable_array_lo_hi_hi; // @[package.scala:45:27] assign prefetchable_array_lo_hi_hi = _GEN_62; // @[package.scala:45:27] wire [2:0] c_array_lo_hi = {c_array_lo_hi_hi, _entries_barrier_3_io_y_c}; // @[package.scala:45:27, :267:25] wire [5:0] c_array_lo = {c_array_lo_hi, c_array_lo_lo}; // @[package.scala:45:27] wire [1:0] _GEN_63 = {_entries_barrier_8_io_y_c, _entries_barrier_7_io_y_c}; // @[package.scala:45:27, :267:25] wire [1:0] c_array_hi_lo_hi; // @[package.scala:45:27] assign c_array_hi_lo_hi = _GEN_63; // @[package.scala:45:27] wire [1:0] prefetchable_array_hi_lo_hi; // @[package.scala:45:27] assign prefetchable_array_hi_lo_hi = _GEN_63; // @[package.scala:45:27] wire [2:0] c_array_hi_lo = {c_array_hi_lo_hi, _entries_barrier_6_io_y_c}; // @[package.scala:45:27, :267:25] wire [1:0] _GEN_64 = {_entries_barrier_11_io_y_c, _entries_barrier_10_io_y_c}; // @[package.scala:45:27, :267:25] wire [1:0] c_array_hi_hi_hi; // @[package.scala:45:27] assign c_array_hi_hi_hi = _GEN_64; // @[package.scala:45:27] wire [1:0] prefetchable_array_hi_hi_hi; // @[package.scala:45:27] assign prefetchable_array_hi_hi_hi = _GEN_64; // @[package.scala:45:27] wire [2:0] c_array_hi_hi = {c_array_hi_hi_hi, _entries_barrier_9_io_y_c}; // @[package.scala:45:27, :267:25] wire [5:0] c_array_hi = {c_array_hi_hi, c_array_hi_lo}; // @[package.scala:45:27] wire [11:0] _c_array_T_1 = {c_array_hi, c_array_lo}; // @[package.scala:45:27] wire [13:0] c_array = {_c_array_T, _c_array_T_1}; // @[package.scala:45:27] wire [13:0] lrscAllowed = c_array; // @[TLB.scala:537:20, :580:24] wire [1:0] _ppp_array_T = {2{_pma_io_resp_pp}}; // @[TLB.scala:422:19, :539:27] wire [1:0] ppp_array_lo_lo_hi = {_entries_barrier_2_io_y_ppp, _entries_barrier_1_io_y_ppp}; // @[package.scala:45:27, :267:25] wire [2:0] ppp_array_lo_lo = {ppp_array_lo_lo_hi, _entries_barrier_io_y_ppp}; // @[package.scala:45:27, :267:25] wire [1:0] ppp_array_lo_hi_hi = {_entries_barrier_5_io_y_ppp, _entries_barrier_4_io_y_ppp}; // @[package.scala:45:27, :267:25] wire [2:0] ppp_array_lo_hi = {ppp_array_lo_hi_hi, _entries_barrier_3_io_y_ppp}; // @[package.scala:45:27, :267:25] wire [5:0] ppp_array_lo = {ppp_array_lo_hi, ppp_array_lo_lo}; // @[package.scala:45:27] wire [1:0] ppp_array_hi_lo_hi = {_entries_barrier_8_io_y_ppp, _entries_barrier_7_io_y_ppp}; // @[package.scala:45:27, :267:25] wire [2:0] ppp_array_hi_lo = {ppp_array_hi_lo_hi, _entries_barrier_6_io_y_ppp}; // @[package.scala:45:27, :267:25] wire [1:0] ppp_array_hi_hi_hi = {_entries_barrier_11_io_y_ppp, _entries_barrier_10_io_y_ppp}; // @[package.scala:45:27, :267:25] wire [2:0] ppp_array_hi_hi = {ppp_array_hi_hi_hi, _entries_barrier_9_io_y_ppp}; // @[package.scala:45:27, :267:25] wire [5:0] ppp_array_hi = {ppp_array_hi_hi, ppp_array_hi_lo}; // @[package.scala:45:27] wire [11:0] _ppp_array_T_1 = {ppp_array_hi, ppp_array_lo}; // @[package.scala:45:27] wire [13:0] ppp_array = {_ppp_array_T, _ppp_array_T_1}; // @[package.scala:45:27] wire [1:0] _paa_array_T = {2{_pma_io_resp_aa}}; // @[TLB.scala:422:19, :541:27] wire [1:0] paa_array_lo_lo_hi = {_entries_barrier_2_io_y_paa, _entries_barrier_1_io_y_paa}; // @[package.scala:45:27, :267:25] wire [2:0] paa_array_lo_lo = {paa_array_lo_lo_hi, _entries_barrier_io_y_paa}; // @[package.scala:45:27, :267:25] wire [1:0] paa_array_lo_hi_hi = {_entries_barrier_5_io_y_paa, _entries_barrier_4_io_y_paa}; // @[package.scala:45:27, :267:25] wire [2:0] paa_array_lo_hi = {paa_array_lo_hi_hi, _entries_barrier_3_io_y_paa}; // @[package.scala:45:27, :267:25] wire [5:0] paa_array_lo = {paa_array_lo_hi, paa_array_lo_lo}; // @[package.scala:45:27] wire [1:0] paa_array_hi_lo_hi = {_entries_barrier_8_io_y_paa, _entries_barrier_7_io_y_paa}; // @[package.scala:45:27, :267:25] wire [2:0] paa_array_hi_lo = {paa_array_hi_lo_hi, _entries_barrier_6_io_y_paa}; // @[package.scala:45:27, :267:25] wire [1:0] paa_array_hi_hi_hi = {_entries_barrier_11_io_y_paa, _entries_barrier_10_io_y_paa}; // @[package.scala:45:27, :267:25] wire [2:0] paa_array_hi_hi = {paa_array_hi_hi_hi, _entries_barrier_9_io_y_paa}; // @[package.scala:45:27, :267:25] wire [5:0] paa_array_hi = {paa_array_hi_hi, paa_array_hi_lo}; // @[package.scala:45:27] wire [11:0] _paa_array_T_1 = {paa_array_hi, paa_array_lo}; // @[package.scala:45:27] wire [13:0] paa_array = {_paa_array_T, _paa_array_T_1}; // @[package.scala:45:27] wire [1:0] _pal_array_T = {2{_pma_io_resp_al}}; // @[TLB.scala:422:19, :543:27] wire [1:0] pal_array_lo_lo_hi = {_entries_barrier_2_io_y_pal, _entries_barrier_1_io_y_pal}; // @[package.scala:45:27, :267:25] wire [2:0] pal_array_lo_lo = {pal_array_lo_lo_hi, _entries_barrier_io_y_pal}; // @[package.scala:45:27, :267:25] wire [1:0] pal_array_lo_hi_hi = {_entries_barrier_5_io_y_pal, _entries_barrier_4_io_y_pal}; // @[package.scala:45:27, :267:25] wire [2:0] pal_array_lo_hi = {pal_array_lo_hi_hi, _entries_barrier_3_io_y_pal}; // @[package.scala:45:27, :267:25] wire [5:0] pal_array_lo = {pal_array_lo_hi, pal_array_lo_lo}; // @[package.scala:45:27] wire [1:0] pal_array_hi_lo_hi = {_entries_barrier_8_io_y_pal, _entries_barrier_7_io_y_pal}; // @[package.scala:45:27, :267:25] wire [2:0] pal_array_hi_lo = {pal_array_hi_lo_hi, _entries_barrier_6_io_y_pal}; // @[package.scala:45:27, :267:25] wire [1:0] pal_array_hi_hi_hi = {_entries_barrier_11_io_y_pal, _entries_barrier_10_io_y_pal}; // @[package.scala:45:27, :267:25] wire [2:0] pal_array_hi_hi = {pal_array_hi_hi_hi, _entries_barrier_9_io_y_pal}; // @[package.scala:45:27, :267:25] wire [5:0] pal_array_hi = {pal_array_hi_hi, pal_array_hi_lo}; // @[package.scala:45:27] wire [11:0] _pal_array_T_1 = {pal_array_hi, pal_array_lo}; // @[package.scala:45:27] wire [13:0] pal_array = {_pal_array_T, _pal_array_T_1}; // @[package.scala:45:27] wire [13:0] ppp_array_if_cached = ppp_array | c_array; // @[TLB.scala:537:20, :539:22, :544:39] wire [13:0] paa_array_if_cached = paa_array | c_array; // @[TLB.scala:537:20, :541:22, :545:39] wire [13:0] pal_array_if_cached = pal_array | c_array; // @[TLB.scala:537:20, :543:22, :546:39] wire _prefetchable_array_T = cacheable & homogeneous; // @[TLBPermissions.scala:101:65] wire [1:0] _prefetchable_array_T_1 = {_prefetchable_array_T, 1'h0}; // @[TLB.scala:547:{43,59}] wire [2:0] prefetchable_array_lo_lo = {prefetchable_array_lo_lo_hi, _entries_barrier_io_y_c}; // @[package.scala:45:27, :267:25] wire [2:0] prefetchable_array_lo_hi = {prefetchable_array_lo_hi_hi, _entries_barrier_3_io_y_c}; // @[package.scala:45:27, :267:25] wire [5:0] prefetchable_array_lo = {prefetchable_array_lo_hi, prefetchable_array_lo_lo}; // @[package.scala:45:27] wire [2:0] prefetchable_array_hi_lo = {prefetchable_array_hi_lo_hi, _entries_barrier_6_io_y_c}; // @[package.scala:45:27, :267:25] wire [2:0] prefetchable_array_hi_hi = {prefetchable_array_hi_hi_hi, _entries_barrier_9_io_y_c}; // @[package.scala:45:27, :267:25] wire [5:0] prefetchable_array_hi = {prefetchable_array_hi_hi, prefetchable_array_hi_lo}; // @[package.scala:45:27] wire [11:0] _prefetchable_array_T_2 = {prefetchable_array_hi, prefetchable_array_lo}; // @[package.scala:45:27] wire [13:0] prefetchable_array = {_prefetchable_array_T_1, _prefetchable_array_T_2}; // @[package.scala:45:27] wire [3:0] _misaligned_T = 4'h1 << io_req_bits_size_0; // @[OneHot.scala:58:35] wire [4:0] _misaligned_T_1 = {1'h0, _misaligned_T} - 5'h1; // @[OneHot.scala:58:35] wire [3:0] _misaligned_T_2 = _misaligned_T_1[3:0]; // @[TLB.scala:550:69] wire [48:0] _misaligned_T_3 = {45'h0, io_req_bits_vaddr_0[3:0] & _misaligned_T_2}; // @[TLB.scala:318:7, :550:{39,69}] wire misaligned = |_misaligned_T_3; // @[TLB.scala:550:{39,77}] wire _bad_va_T = vm_enabled & stage1_en; // @[TLB.scala:374:29, :399:61, :568:21] wire bad_va_additionalPgLevels = satp_mode[0]; // @[package.scala:163:13] wire _bad_va_T_8 = bad_va_additionalPgLevels; // @[package.scala:163:13] wire [48:0] bad_va_maskedVAddr = io_req_bits_vaddr_0 & 49'h1FFC000000000; // @[TLB.scala:318:7, :559:43] wire _bad_va_T_1 = ~bad_va_additionalPgLevels; // @[package.scala:163:13] wire _bad_va_T_2 = bad_va_maskedVAddr == 49'h0; // @[TLB.scala:559:43, :560:51] wire _bad_va_T_3 = bad_va_maskedVAddr == 49'h1FFC000000000; // @[TLB.scala:559:43, :560:86] wire _bad_va_T_4 = _bad_va_T_3; // @[TLB.scala:560:{71,86}] wire _bad_va_T_5 = _bad_va_T_2 | _bad_va_T_4; // @[TLB.scala:560:{51,59,71}] wire _bad_va_T_6 = ~_bad_va_T_5; // @[TLB.scala:560:{37,59}] wire _bad_va_T_7 = _bad_va_T_1 & _bad_va_T_6; // @[TLB.scala:560:{26,34,37}] wire [48:0] bad_va_maskedVAddr_1 = io_req_bits_vaddr_0 & 49'h1800000000000; // @[TLB.scala:318:7, :559:43] wire _bad_va_T_9 = bad_va_maskedVAddr_1 == 49'h0; // @[TLB.scala:559:43, :560:51] wire _bad_va_T_10 = bad_va_maskedVAddr_1 == 49'h1800000000000; // @[TLB.scala:559:43, :560:86] wire _bad_va_T_11 = _bad_va_T_10; // @[TLB.scala:560:{71,86}] wire _bad_va_T_12 = _bad_va_T_9 | _bad_va_T_11; // @[TLB.scala:560:{51,59,71}] wire _bad_va_T_13 = ~_bad_va_T_12; // @[TLB.scala:560:{37,59}] wire _bad_va_T_14 = _bad_va_T_8 & _bad_va_T_13; // @[TLB.scala:560:{26,34,37}] wire _bad_va_T_15 = _bad_va_T_7 | _bad_va_T_14; // @[package.scala:81:59] wire bad_va = _bad_va_T & _bad_va_T_15; // @[package.scala:81:59] wire _GEN_65 = io_req_bits_cmd_0 == 5'h6; // @[package.scala:16:47] wire _cmd_lrsc_T; // @[package.scala:16:47] assign _cmd_lrsc_T = _GEN_65; // @[package.scala:16:47] wire _cmd_read_T_2; // @[package.scala:16:47] assign _cmd_read_T_2 = _GEN_65; // @[package.scala:16:47] wire _GEN_66 = io_req_bits_cmd_0 == 5'h7; // @[package.scala:16:47] wire _cmd_lrsc_T_1; // @[package.scala:16:47] assign _cmd_lrsc_T_1 = _GEN_66; // @[package.scala:16:47] wire _cmd_read_T_3; // @[package.scala:16:47] assign _cmd_read_T_3 = _GEN_66; // @[package.scala:16:47] wire _cmd_write_T_3; // @[Consts.scala:90:66] assign _cmd_write_T_3 = _GEN_66; // @[package.scala:16:47] wire _cmd_lrsc_T_2 = _cmd_lrsc_T | _cmd_lrsc_T_1; // @[package.scala:16:47, :81:59] wire cmd_lrsc = _cmd_lrsc_T_2; // @[package.scala:81:59] wire _GEN_67 = io_req_bits_cmd_0 == 5'h4; // @[package.scala:16:47] wire _cmd_amo_logical_T; // @[package.scala:16:47] assign _cmd_amo_logical_T = _GEN_67; // @[package.scala:16:47] wire _cmd_read_T_7; // @[package.scala:16:47] assign _cmd_read_T_7 = _GEN_67; // @[package.scala:16:47] wire _cmd_write_T_5; // @[package.scala:16:47] assign _cmd_write_T_5 = _GEN_67; // @[package.scala:16:47] wire _GEN_68 = io_req_bits_cmd_0 == 5'h9; // @[package.scala:16:47] wire _cmd_amo_logical_T_1; // @[package.scala:16:47] assign _cmd_amo_logical_T_1 = _GEN_68; // @[package.scala:16:47] wire _cmd_read_T_8; // @[package.scala:16:47] assign _cmd_read_T_8 = _GEN_68; // @[package.scala:16:47] wire _cmd_write_T_6; // @[package.scala:16:47] assign _cmd_write_T_6 = _GEN_68; // @[package.scala:16:47] wire _GEN_69 = io_req_bits_cmd_0 == 5'hA; // @[package.scala:16:47] wire _cmd_amo_logical_T_2; // @[package.scala:16:47] assign _cmd_amo_logical_T_2 = _GEN_69; // @[package.scala:16:47] wire _cmd_read_T_9; // @[package.scala:16:47] assign _cmd_read_T_9 = _GEN_69; // @[package.scala:16:47] wire _cmd_write_T_7; // @[package.scala:16:47] assign _cmd_write_T_7 = _GEN_69; // @[package.scala:16:47] wire _GEN_70 = io_req_bits_cmd_0 == 5'hB; // @[package.scala:16:47] wire _cmd_amo_logical_T_3; // @[package.scala:16:47] assign _cmd_amo_logical_T_3 = _GEN_70; // @[package.scala:16:47] wire _cmd_read_T_10; // @[package.scala:16:47] assign _cmd_read_T_10 = _GEN_70; // @[package.scala:16:47] wire _cmd_write_T_8; // @[package.scala:16:47] assign _cmd_write_T_8 = _GEN_70; // @[package.scala:16:47] wire _cmd_amo_logical_T_4 = _cmd_amo_logical_T | _cmd_amo_logical_T_1; // @[package.scala:16:47, :81:59] wire _cmd_amo_logical_T_5 = _cmd_amo_logical_T_4 | _cmd_amo_logical_T_2; // @[package.scala:16:47, :81:59] wire _cmd_amo_logical_T_6 = _cmd_amo_logical_T_5 | _cmd_amo_logical_T_3; // @[package.scala:16:47, :81:59] wire cmd_amo_logical = _cmd_amo_logical_T_6; // @[package.scala:81:59] wire _GEN_71 = io_req_bits_cmd_0 == 5'h8; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T; // @[package.scala:16:47] assign _cmd_amo_arithmetic_T = _GEN_71; // @[package.scala:16:47] wire _cmd_read_T_14; // @[package.scala:16:47] assign _cmd_read_T_14 = _GEN_71; // @[package.scala:16:47] wire _cmd_write_T_12; // @[package.scala:16:47] assign _cmd_write_T_12 = _GEN_71; // @[package.scala:16:47] wire _GEN_72 = io_req_bits_cmd_0 == 5'hC; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T_1; // @[package.scala:16:47] assign _cmd_amo_arithmetic_T_1 = _GEN_72; // @[package.scala:16:47] wire _cmd_read_T_15; // @[package.scala:16:47] assign _cmd_read_T_15 = _GEN_72; // @[package.scala:16:47] wire _cmd_write_T_13; // @[package.scala:16:47] assign _cmd_write_T_13 = _GEN_72; // @[package.scala:16:47] wire _GEN_73 = io_req_bits_cmd_0 == 5'hD; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T_2; // @[package.scala:16:47] assign _cmd_amo_arithmetic_T_2 = _GEN_73; // @[package.scala:16:47] wire _cmd_read_T_16; // @[package.scala:16:47] assign _cmd_read_T_16 = _GEN_73; // @[package.scala:16:47] wire _cmd_write_T_14; // @[package.scala:16:47] assign _cmd_write_T_14 = _GEN_73; // @[package.scala:16:47] wire _GEN_74 = io_req_bits_cmd_0 == 5'hE; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T_3; // @[package.scala:16:47] assign _cmd_amo_arithmetic_T_3 = _GEN_74; // @[package.scala:16:47] wire _cmd_read_T_17; // @[package.scala:16:47] assign _cmd_read_T_17 = _GEN_74; // @[package.scala:16:47] wire _cmd_write_T_15; // @[package.scala:16:47] assign _cmd_write_T_15 = _GEN_74; // @[package.scala:16:47] wire _GEN_75 = io_req_bits_cmd_0 == 5'hF; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T_4; // @[package.scala:16:47] assign _cmd_amo_arithmetic_T_4 = _GEN_75; // @[package.scala:16:47] wire _cmd_read_T_18; // @[package.scala:16:47] assign _cmd_read_T_18 = _GEN_75; // @[package.scala:16:47] wire _cmd_write_T_16; // @[package.scala:16:47] assign _cmd_write_T_16 = _GEN_75; // @[package.scala:16:47] wire _cmd_amo_arithmetic_T_5 = _cmd_amo_arithmetic_T | _cmd_amo_arithmetic_T_1; // @[package.scala:16:47, :81:59] wire _cmd_amo_arithmetic_T_6 = _cmd_amo_arithmetic_T_5 | _cmd_amo_arithmetic_T_2; // @[package.scala:16:47, :81:59] wire _cmd_amo_arithmetic_T_7 = _cmd_amo_arithmetic_T_6 | _cmd_amo_arithmetic_T_3; // @[package.scala:16:47, :81:59] wire _cmd_amo_arithmetic_T_8 = _cmd_amo_arithmetic_T_7 | _cmd_amo_arithmetic_T_4; // @[package.scala:16:47, :81:59] wire cmd_amo_arithmetic = _cmd_amo_arithmetic_T_8; // @[package.scala:81:59] wire _GEN_76 = io_req_bits_cmd_0 == 5'h11; // @[TLB.scala:318:7, :573:41] wire cmd_put_partial; // @[TLB.scala:573:41] assign cmd_put_partial = _GEN_76; // @[TLB.scala:573:41] wire _cmd_write_T_1; // @[Consts.scala:90:49] assign _cmd_write_T_1 = _GEN_76; // @[TLB.scala:573:41] wire _cmd_read_T = io_req_bits_cmd_0 == 5'h0; // @[package.scala:16:47] wire _GEN_77 = io_req_bits_cmd_0 == 5'h10; // @[package.scala:16:47] wire _cmd_read_T_1; // @[package.scala:16:47] assign _cmd_read_T_1 = _GEN_77; // @[package.scala:16:47] wire _cmd_readx_T; // @[TLB.scala:575:56] assign _cmd_readx_T = _GEN_77; // @[package.scala:16:47] wire _cmd_read_T_4 = _cmd_read_T | _cmd_read_T_1; // @[package.scala:16:47, :81:59] wire _cmd_read_T_5 = _cmd_read_T_4 | _cmd_read_T_2; // @[package.scala:16:47, :81:59] wire _cmd_read_T_6 = _cmd_read_T_5 | _cmd_read_T_3; // @[package.scala:16:47, :81:59] wire _cmd_read_T_11 = _cmd_read_T_7 | _cmd_read_T_8; // @[package.scala:16:47, :81:59] wire _cmd_read_T_12 = _cmd_read_T_11 | _cmd_read_T_9; // @[package.scala:16:47, :81:59] wire _cmd_read_T_13 = _cmd_read_T_12 | _cmd_read_T_10; // @[package.scala:16:47, :81:59] wire _cmd_read_T_19 = _cmd_read_T_14 | _cmd_read_T_15; // @[package.scala:16:47, :81:59] wire _cmd_read_T_20 = _cmd_read_T_19 | _cmd_read_T_16; // @[package.scala:16:47, :81:59] wire _cmd_read_T_21 = _cmd_read_T_20 | _cmd_read_T_17; // @[package.scala:16:47, :81:59] wire _cmd_read_T_22 = _cmd_read_T_21 | _cmd_read_T_18; // @[package.scala:16:47, :81:59] wire _cmd_read_T_23 = _cmd_read_T_13 | _cmd_read_T_22; // @[package.scala:81:59] wire cmd_read = _cmd_read_T_6 | _cmd_read_T_23; // @[package.scala:81:59] wire _cmd_write_T = io_req_bits_cmd_0 == 5'h1; // @[TLB.scala:318:7] wire _cmd_write_T_2 = _cmd_write_T | _cmd_write_T_1; // @[Consts.scala:90:{32,42,49}] wire _cmd_write_T_4 = _cmd_write_T_2 | _cmd_write_T_3; // @[Consts.scala:90:{42,59,66}] wire _cmd_write_T_9 = _cmd_write_T_5 | _cmd_write_T_6; // @[package.scala:16:47, :81:59] wire _cmd_write_T_10 = _cmd_write_T_9 | _cmd_write_T_7; // @[package.scala:16:47, :81:59] wire _cmd_write_T_11 = _cmd_write_T_10 | _cmd_write_T_8; // @[package.scala:16:47, :81:59] wire _cmd_write_T_17 = _cmd_write_T_12 | _cmd_write_T_13; // @[package.scala:16:47, :81:59] wire _cmd_write_T_18 = _cmd_write_T_17 | _cmd_write_T_14; // @[package.scala:16:47, :81:59] wire _cmd_write_T_19 = _cmd_write_T_18 | _cmd_write_T_15; // @[package.scala:16:47, :81:59] wire _cmd_write_T_20 = _cmd_write_T_19 | _cmd_write_T_16; // @[package.scala:16:47, :81:59] wire _cmd_write_T_21 = _cmd_write_T_11 | _cmd_write_T_20; // @[package.scala:81:59] wire cmd_write = _cmd_write_T_4 | _cmd_write_T_21; // @[Consts.scala:87:44, :90:{59,76}] wire _cmd_write_perms_T = io_req_bits_cmd_0 == 5'h5; // @[package.scala:16:47] wire _cmd_write_perms_T_1 = io_req_bits_cmd_0 == 5'h17; // @[package.scala:16:47] wire _cmd_write_perms_T_2 = _cmd_write_perms_T | _cmd_write_perms_T_1; // @[package.scala:16:47, :81:59] wire cmd_write_perms = cmd_write | _cmd_write_perms_T_2; // @[package.scala:81:59] wire [13:0] _ae_array_T = misaligned ? eff_array : 14'h0; // @[TLB.scala:535:22, :550:77, :582:8] wire [13:0] _ae_array_T_1 = ~lrscAllowed; // @[TLB.scala:580:24, :583:19] wire [13:0] _ae_array_T_2 = cmd_lrsc ? _ae_array_T_1 : 14'h0; // @[TLB.scala:570:33, :583:{8,19}] wire [13:0] ae_array = _ae_array_T | _ae_array_T_2; // @[TLB.scala:582:{8,37}, :583:8] wire [13:0] _ae_ld_array_T = ~pr_array; // @[TLB.scala:529:87, :586:46] wire [13:0] _ae_ld_array_T_1 = ae_array | _ae_ld_array_T; // @[TLB.scala:582:37, :586:{44,46}] wire [13:0] ae_ld_array = cmd_read ? _ae_ld_array_T_1 : 14'h0; // @[TLB.scala:586:{24,44}] wire [13:0] _ae_st_array_T = ~pw_array; // @[TLB.scala:531:87, :588:37] wire [13:0] _ae_st_array_T_1 = ae_array | _ae_st_array_T; // @[TLB.scala:582:37, :588:{35,37}] wire [13:0] _ae_st_array_T_2 = cmd_write_perms ? _ae_st_array_T_1 : 14'h0; // @[TLB.scala:577:35, :588:{8,35}] wire [13:0] _ae_st_array_T_3 = ~ppp_array_if_cached; // @[TLB.scala:544:39, :589:26] wire [13:0] _ae_st_array_T_4 = cmd_put_partial ? _ae_st_array_T_3 : 14'h0; // @[TLB.scala:573:41, :589:{8,26}] wire [13:0] _ae_st_array_T_5 = _ae_st_array_T_2 | _ae_st_array_T_4; // @[TLB.scala:588:{8,53}, :589:8] wire [13:0] _ae_st_array_T_6 = ~pal_array_if_cached; // @[TLB.scala:546:39, :590:26] wire [13:0] _ae_st_array_T_7 = cmd_amo_logical ? _ae_st_array_T_6 : 14'h0; // @[TLB.scala:571:40, :590:{8,26}] wire [13:0] _ae_st_array_T_8 = _ae_st_array_T_5 | _ae_st_array_T_7; // @[TLB.scala:588:53, :589:53, :590:8] wire [13:0] _ae_st_array_T_9 = ~paa_array_if_cached; // @[TLB.scala:545:39, :591:29] wire [13:0] _ae_st_array_T_10 = cmd_amo_arithmetic ? _ae_st_array_T_9 : 14'h0; // @[TLB.scala:572:43, :591:{8,29}] wire [13:0] ae_st_array = _ae_st_array_T_8 | _ae_st_array_T_10; // @[TLB.scala:589:53, :590:53, :591:8] wire [13:0] _must_alloc_array_T = ~ppp_array; // @[TLB.scala:539:22, :593:26] wire [13:0] _must_alloc_array_T_1 = cmd_put_partial ? _must_alloc_array_T : 14'h0; // @[TLB.scala:573:41, :593:{8,26}] wire [13:0] _must_alloc_array_T_2 = ~pal_array; // @[TLB.scala:543:22, :594:26] wire [13:0] _must_alloc_array_T_3 = cmd_amo_logical ? _must_alloc_array_T_2 : 14'h0; // @[TLB.scala:571:40, :594:{8,26}] wire [13:0] _must_alloc_array_T_4 = _must_alloc_array_T_1 | _must_alloc_array_T_3; // @[TLB.scala:593:{8,43}, :594:8] wire [13:0] _must_alloc_array_T_5 = ~paa_array; // @[TLB.scala:541:22, :595:29] wire [13:0] _must_alloc_array_T_6 = cmd_amo_arithmetic ? _must_alloc_array_T_5 : 14'h0; // @[TLB.scala:572:43, :595:{8,29}] wire [13:0] _must_alloc_array_T_7 = _must_alloc_array_T_4 | _must_alloc_array_T_6; // @[TLB.scala:593:43, :594:43, :595:8] wire [13:0] _must_alloc_array_T_9 = {14{cmd_lrsc}}; // @[TLB.scala:570:33, :596:8] wire [13:0] must_alloc_array = _must_alloc_array_T_7 | _must_alloc_array_T_9; // @[TLB.scala:594:43, :595:46, :596:8] wire [13:0] _pf_ld_array_T_1 = ~_pf_ld_array_T; // @[TLB.scala:597:{37,41}] wire [13:0] _pf_ld_array_T_2 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73] wire [13:0] _pf_ld_array_T_3 = _pf_ld_array_T_1 & _pf_ld_array_T_2; // @[TLB.scala:597:{37,71,73}] wire [13:0] _pf_ld_array_T_4 = _pf_ld_array_T_3 | ptw_pf_array; // @[TLB.scala:508:25, :597:{71,88}] wire [13:0] _pf_ld_array_T_5 = ~ptw_gf_array; // @[TLB.scala:509:25, :597:106] wire [13:0] _pf_ld_array_T_6 = _pf_ld_array_T_4 & _pf_ld_array_T_5; // @[TLB.scala:597:{88,104,106}] wire [13:0] pf_ld_array = cmd_read ? _pf_ld_array_T_6 : 14'h0; // @[TLB.scala:597:{24,104}] wire [13:0] _pf_st_array_T = ~w_array; // @[TLB.scala:521:20, :598:44] wire [13:0] _pf_st_array_T_1 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73, :598:55] wire [13:0] _pf_st_array_T_2 = _pf_st_array_T & _pf_st_array_T_1; // @[TLB.scala:598:{44,53,55}] wire [13:0] _pf_st_array_T_3 = _pf_st_array_T_2 | ptw_pf_array; // @[TLB.scala:508:25, :598:{53,70}] wire [13:0] _pf_st_array_T_4 = ~ptw_gf_array; // @[TLB.scala:509:25, :597:106, :598:88] wire [13:0] _pf_st_array_T_5 = _pf_st_array_T_3 & _pf_st_array_T_4; // @[TLB.scala:598:{70,86,88}] wire [13:0] pf_st_array = cmd_write_perms ? _pf_st_array_T_5 : 14'h0; // @[TLB.scala:577:35, :598:{24,86}] wire [13:0] _pf_inst_array_T = ~x_array; // @[TLB.scala:522:20, :599:25] wire [13:0] _pf_inst_array_T_1 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73, :599:36] wire [13:0] _pf_inst_array_T_2 = _pf_inst_array_T & _pf_inst_array_T_1; // @[TLB.scala:599:{25,34,36}] wire [13:0] _pf_inst_array_T_3 = _pf_inst_array_T_2 | ptw_pf_array; // @[TLB.scala:508:25, :599:{34,51}] wire [13:0] _pf_inst_array_T_4 = ~ptw_gf_array; // @[TLB.scala:509:25, :597:106, :599:69] wire [13:0] pf_inst_array = _pf_inst_array_T_3 & _pf_inst_array_T_4; // @[TLB.scala:599:{51,67,69}] wire [13:0] _gf_ld_array_T_4 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73, :600:100] wire [13:0] _gf_ld_array_T_5 = _gf_ld_array_T_3 & _gf_ld_array_T_4; // @[TLB.scala:600:{82,98,100}] wire [13:0] _gf_st_array_T_3 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73, :601:81] wire [13:0] _gf_st_array_T_4 = _gf_st_array_T_2 & _gf_st_array_T_3; // @[TLB.scala:601:{63,79,81}] wire [13:0] _gf_inst_array_T_2 = ~ptw_ae_array; // @[TLB.scala:506:25, :597:73, :602:64] wire [13:0] _gf_inst_array_T_3 = _gf_inst_array_T_1 & _gf_inst_array_T_2; // @[TLB.scala:602:{46,62,64}] wire _gpa_hits_hit_mask_T = r_gpa_vpn == vpn; // @[TLB.scala:335:30, :364:22, :606:73] wire _gpa_hits_hit_mask_T_1 = r_gpa_valid & _gpa_hits_hit_mask_T; // @[TLB.scala:362:24, :606:{60,73}] wire [11:0] _gpa_hits_hit_mask_T_2 = {12{_gpa_hits_hit_mask_T_1}}; // @[TLB.scala:606:{24,60}] wire tlb_hit_if_not_gpa_miss = |real_hits; // @[package.scala:45:27] wire tlb_hit = |_tlb_hit_T; // @[TLB.scala:611:{28,40}] wire _tlb_miss_T_2 = ~bad_va; // @[TLB.scala:568:34, :613:56] wire _tlb_miss_T_3 = _tlb_miss_T_1 & _tlb_miss_T_2; // @[TLB.scala:613:{29,53,56}] wire _tlb_miss_T_4 = ~tlb_hit; // @[TLB.scala:611:40, :613:67] wire tlb_miss = _tlb_miss_T_3 & _tlb_miss_T_4; // @[TLB.scala:613:{53,64,67}] reg [6:0] state_vec_0; // @[Replacement.scala:305:17] reg [2:0] state_reg_1; // @[Replacement.scala:168:70] wire [1:0] _GEN_78 = {sector_hits_1, sector_hits_0}; // @[OneHot.scala:21:45] wire [1:0] lo_lo; // @[OneHot.scala:21:45] assign lo_lo = _GEN_78; // @[OneHot.scala:21:45] wire [1:0] r_sectored_hit_bits_lo_lo; // @[OneHot.scala:21:45] assign r_sectored_hit_bits_lo_lo = _GEN_78; // @[OneHot.scala:21:45] wire [1:0] _GEN_79 = {sector_hits_3, sector_hits_2}; // @[OneHot.scala:21:45] wire [1:0] lo_hi; // @[OneHot.scala:21:45] assign lo_hi = _GEN_79; // @[OneHot.scala:21:45] wire [1:0] r_sectored_hit_bits_lo_hi; // @[OneHot.scala:21:45] assign r_sectored_hit_bits_lo_hi = _GEN_79; // @[OneHot.scala:21:45] wire [3:0] lo = {lo_hi, lo_lo}; // @[OneHot.scala:21:45] wire [3:0] lo_1 = lo; // @[OneHot.scala:21:45, :31:18] wire [1:0] _GEN_80 = {sector_hits_5, sector_hits_4}; // @[OneHot.scala:21:45] wire [1:0] hi_lo; // @[OneHot.scala:21:45] assign hi_lo = _GEN_80; // @[OneHot.scala:21:45] wire [1:0] r_sectored_hit_bits_hi_lo; // @[OneHot.scala:21:45] assign r_sectored_hit_bits_hi_lo = _GEN_80; // @[OneHot.scala:21:45] wire [1:0] _GEN_81 = {sector_hits_7, sector_hits_6}; // @[OneHot.scala:21:45] wire [1:0] hi_hi; // @[OneHot.scala:21:45] assign hi_hi = _GEN_81; // @[OneHot.scala:21:45] wire [1:0] r_sectored_hit_bits_hi_hi; // @[OneHot.scala:21:45] assign r_sectored_hit_bits_hi_hi = _GEN_81; // @[OneHot.scala:21:45] wire [3:0] hi = {hi_hi, hi_lo}; // @[OneHot.scala:21:45] wire [3:0] hi_1 = hi; // @[OneHot.scala:21:45, :30:18] wire [3:0] _T_33 = hi_1 | lo_1; // @[OneHot.scala:30:18, :31:18, :32:28] wire [1:0] hi_2 = _T_33[3:2]; // @[OneHot.scala:30:18, :32:28] wire [1:0] lo_2 = _T_33[1:0]; // @[OneHot.scala:31:18, :32:28] wire [2:0] state_vec_0_touch_way_sized = {|hi_1, |hi_2, hi_2[1] | lo_2[1]}; // @[OneHot.scala:30:18, :31:18, :32:{10,14,28}] wire _state_vec_0_set_left_older_T = state_vec_0_touch_way_sized[2]; // @[package.scala:163:13] wire state_vec_0_set_left_older = ~_state_vec_0_set_left_older_T; // @[Replacement.scala:196:{33,43}] wire [2:0] state_vec_0_left_subtree_state = state_vec_0[5:3]; // @[package.scala:163:13] wire [2:0] r_sectored_repl_addr_left_subtree_state = state_vec_0[5:3]; // @[package.scala:163:13] wire [2:0] state_vec_0_right_subtree_state = state_vec_0[2:0]; // @[Replacement.scala:198:38, :305:17] wire [2:0] r_sectored_repl_addr_right_subtree_state = state_vec_0[2:0]; // @[Replacement.scala:198:38, :245:38, :305:17] wire [1:0] _state_vec_0_T = state_vec_0_touch_way_sized[1:0]; // @[package.scala:163:13] wire [1:0] _state_vec_0_T_11 = state_vec_0_touch_way_sized[1:0]; // @[package.scala:163:13] wire _state_vec_0_set_left_older_T_1 = _state_vec_0_T[1]; // @[package.scala:163:13] wire state_vec_0_set_left_older_1 = ~_state_vec_0_set_left_older_T_1; // @[Replacement.scala:196:{33,43}] wire state_vec_0_left_subtree_state_1 = state_vec_0_left_subtree_state[1]; // @[package.scala:163:13] wire state_vec_0_right_subtree_state_1 = state_vec_0_left_subtree_state[0]; // @[package.scala:163:13] wire _state_vec_0_T_1 = _state_vec_0_T[0]; // @[package.scala:163:13] wire _state_vec_0_T_5 = _state_vec_0_T[0]; // @[package.scala:163:13] wire _state_vec_0_T_2 = _state_vec_0_T_1; // @[package.scala:163:13] wire _state_vec_0_T_3 = ~_state_vec_0_T_2; // @[Replacement.scala:218:{7,17}] wire _state_vec_0_T_4 = state_vec_0_set_left_older_1 ? state_vec_0_left_subtree_state_1 : _state_vec_0_T_3; // @[package.scala:163:13] wire _state_vec_0_T_6 = _state_vec_0_T_5; // @[Replacement.scala:207:62, :218:17] wire _state_vec_0_T_7 = ~_state_vec_0_T_6; // @[Replacement.scala:218:{7,17}] wire _state_vec_0_T_8 = state_vec_0_set_left_older_1 ? _state_vec_0_T_7 : state_vec_0_right_subtree_state_1; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7] wire [1:0] state_vec_0_hi = {state_vec_0_set_left_older_1, _state_vec_0_T_4}; // @[Replacement.scala:196:33, :202:12, :203:16] wire [2:0] _state_vec_0_T_9 = {state_vec_0_hi, _state_vec_0_T_8}; // @[Replacement.scala:202:12, :206:16] wire [2:0] _state_vec_0_T_10 = state_vec_0_set_left_older ? state_vec_0_left_subtree_state : _state_vec_0_T_9; // @[package.scala:163:13] wire _state_vec_0_set_left_older_T_2 = _state_vec_0_T_11[1]; // @[Replacement.scala:196:43, :207:62] wire state_vec_0_set_left_older_2 = ~_state_vec_0_set_left_older_T_2; // @[Replacement.scala:196:{33,43}] wire state_vec_0_left_subtree_state_2 = state_vec_0_right_subtree_state[1]; // @[package.scala:163:13] wire state_vec_0_right_subtree_state_2 = state_vec_0_right_subtree_state[0]; // @[Replacement.scala:198:38] wire _state_vec_0_T_12 = _state_vec_0_T_11[0]; // @[package.scala:163:13] wire _state_vec_0_T_16 = _state_vec_0_T_11[0]; // @[package.scala:163:13] wire _state_vec_0_T_13 = _state_vec_0_T_12; // @[package.scala:163:13] wire _state_vec_0_T_14 = ~_state_vec_0_T_13; // @[Replacement.scala:218:{7,17}] wire _state_vec_0_T_15 = state_vec_0_set_left_older_2 ? state_vec_0_left_subtree_state_2 : _state_vec_0_T_14; // @[package.scala:163:13] wire _state_vec_0_T_17 = _state_vec_0_T_16; // @[Replacement.scala:207:62, :218:17] wire _state_vec_0_T_18 = ~_state_vec_0_T_17; // @[Replacement.scala:218:{7,17}] wire _state_vec_0_T_19 = state_vec_0_set_left_older_2 ? _state_vec_0_T_18 : state_vec_0_right_subtree_state_2; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7] wire [1:0] state_vec_0_hi_1 = {state_vec_0_set_left_older_2, _state_vec_0_T_15}; // @[Replacement.scala:196:33, :202:12, :203:16] wire [2:0] _state_vec_0_T_20 = {state_vec_0_hi_1, _state_vec_0_T_19}; // @[Replacement.scala:202:12, :206:16] wire [2:0] _state_vec_0_T_21 = state_vec_0_set_left_older ? _state_vec_0_T_20 : state_vec_0_right_subtree_state; // @[Replacement.scala:196:33, :198:38, :202:12, :206:16] wire [3:0] state_vec_0_hi_2 = {state_vec_0_set_left_older, _state_vec_0_T_10}; // @[Replacement.scala:196:33, :202:12, :203:16] wire [6:0] _state_vec_0_T_22 = {state_vec_0_hi_2, _state_vec_0_T_21}; // @[Replacement.scala:202:12, :206:16] wire [1:0] _GEN_82 = {superpage_hits_1, superpage_hits_0}; // @[OneHot.scala:21:45] wire [1:0] lo_3; // @[OneHot.scala:21:45] assign lo_3 = _GEN_82; // @[OneHot.scala:21:45] wire [1:0] r_superpage_hit_bits_lo; // @[OneHot.scala:21:45] assign r_superpage_hit_bits_lo = _GEN_82; // @[OneHot.scala:21:45] wire [1:0] lo_4 = lo_3; // @[OneHot.scala:21:45, :31:18] wire [1:0] _GEN_83 = {superpage_hits_3, superpage_hits_2}; // @[OneHot.scala:21:45] wire [1:0] hi_3; // @[OneHot.scala:21:45] assign hi_3 = _GEN_83; // @[OneHot.scala:21:45] wire [1:0] r_superpage_hit_bits_hi; // @[OneHot.scala:21:45] assign r_superpage_hit_bits_hi = _GEN_83; // @[OneHot.scala:21:45] wire [1:0] hi_4 = hi_3; // @[OneHot.scala:21:45, :30:18] wire [1:0] state_reg_touch_way_sized = {|hi_4, hi_4[1] | lo_4[1]}; // @[OneHot.scala:30:18, :31:18, :32:{10,14,28}] wire _state_reg_set_left_older_T = state_reg_touch_way_sized[1]; // @[package.scala:163:13] wire state_reg_set_left_older = ~_state_reg_set_left_older_T; // @[Replacement.scala:196:{33,43}] wire state_reg_left_subtree_state = state_reg_1[1]; // @[package.scala:163:13] wire r_superpage_repl_addr_left_subtree_state = state_reg_1[1]; // @[package.scala:163:13] wire state_reg_right_subtree_state = state_reg_1[0]; // @[Replacement.scala:168:70, :198:38] wire r_superpage_repl_addr_right_subtree_state = state_reg_1[0]; // @[Replacement.scala:168:70, :198:38, :245:38] wire _state_reg_T = state_reg_touch_way_sized[0]; // @[package.scala:163:13] wire _state_reg_T_4 = state_reg_touch_way_sized[0]; // @[package.scala:163:13] wire _state_reg_T_1 = _state_reg_T; // @[package.scala:163:13] wire _state_reg_T_2 = ~_state_reg_T_1; // @[Replacement.scala:218:{7,17}] wire _state_reg_T_3 = state_reg_set_left_older ? state_reg_left_subtree_state : _state_reg_T_2; // @[package.scala:163:13] wire _state_reg_T_5 = _state_reg_T_4; // @[Replacement.scala:207:62, :218:17] wire _state_reg_T_6 = ~_state_reg_T_5; // @[Replacement.scala:218:{7,17}] wire _state_reg_T_7 = state_reg_set_left_older ? _state_reg_T_6 : state_reg_right_subtree_state; // @[Replacement.scala:196:33, :198:38, :206:16, :218:7] wire [1:0] state_reg_hi = {state_reg_set_left_older, _state_reg_T_3}; // @[Replacement.scala:196:33, :202:12, :203:16] wire [2:0] _state_reg_T_8 = {state_reg_hi, _state_reg_T_7}; // @[Replacement.scala:202:12, :206:16] wire [5:0] _multipleHits_T = real_hits[5:0]; // @[package.scala:45:27] wire [2:0] _multipleHits_T_1 = _multipleHits_T[2:0]; // @[Misc.scala:181:37] wire _multipleHits_T_2 = _multipleHits_T_1[0]; // @[Misc.scala:181:37] wire multipleHits_leftOne = _multipleHits_T_2; // @[Misc.scala:178:18, :181:37] wire [1:0] _multipleHits_T_3 = _multipleHits_T_1[2:1]; // @[Misc.scala:181:37, :182:39] wire _multipleHits_T_4 = _multipleHits_T_3[0]; // @[Misc.scala:181:37, :182:39] wire multipleHits_leftOne_1 = _multipleHits_T_4; // @[Misc.scala:178:18, :181:37] wire _multipleHits_T_5 = _multipleHits_T_3[1]; // @[Misc.scala:182:39] wire multipleHits_rightOne = _multipleHits_T_5; // @[Misc.scala:178:18, :182:39] wire multipleHits_rightOne_1 = multipleHits_leftOne_1 | multipleHits_rightOne; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_7 = multipleHits_leftOne_1 & multipleHits_rightOne; // @[Misc.scala:178:18, :183:61] wire multipleHits_rightTwo = _multipleHits_T_7; // @[Misc.scala:183:{49,61}] wire _multipleHits_T_8 = multipleHits_rightTwo; // @[Misc.scala:183:{37,49}] wire multipleHits_leftOne_2 = multipleHits_leftOne | multipleHits_rightOne_1; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_9 = multipleHits_leftOne & multipleHits_rightOne_1; // @[Misc.scala:178:18, :183:{16,61}] wire multipleHits_leftTwo = _multipleHits_T_8 | _multipleHits_T_9; // @[Misc.scala:183:{37,49,61}] wire [2:0] _multipleHits_T_10 = _multipleHits_T[5:3]; // @[Misc.scala:181:37, :182:39] wire _multipleHits_T_11 = _multipleHits_T_10[0]; // @[Misc.scala:181:37, :182:39] wire multipleHits_leftOne_3 = _multipleHits_T_11; // @[Misc.scala:178:18, :181:37] wire [1:0] _multipleHits_T_12 = _multipleHits_T_10[2:1]; // @[Misc.scala:182:39] wire _multipleHits_T_13 = _multipleHits_T_12[0]; // @[Misc.scala:181:37, :182:39] wire multipleHits_leftOne_4 = _multipleHits_T_13; // @[Misc.scala:178:18, :181:37] wire _multipleHits_T_14 = _multipleHits_T_12[1]; // @[Misc.scala:182:39] wire multipleHits_rightOne_2 = _multipleHits_T_14; // @[Misc.scala:178:18, :182:39] wire multipleHits_rightOne_3 = multipleHits_leftOne_4 | multipleHits_rightOne_2; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_16 = multipleHits_leftOne_4 & multipleHits_rightOne_2; // @[Misc.scala:178:18, :183:61] wire multipleHits_rightTwo_1 = _multipleHits_T_16; // @[Misc.scala:183:{49,61}] wire _multipleHits_T_17 = multipleHits_rightTwo_1; // @[Misc.scala:183:{37,49}] wire multipleHits_rightOne_4 = multipleHits_leftOne_3 | multipleHits_rightOne_3; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_18 = multipleHits_leftOne_3 & multipleHits_rightOne_3; // @[Misc.scala:178:18, :183:{16,61}] wire multipleHits_rightTwo_2 = _multipleHits_T_17 | _multipleHits_T_18; // @[Misc.scala:183:{37,49,61}] wire multipleHits_leftOne_5 = multipleHits_leftOne_2 | multipleHits_rightOne_4; // @[Misc.scala:183:16] wire _multipleHits_T_19 = multipleHits_leftTwo | multipleHits_rightTwo_2; // @[Misc.scala:183:{37,49}] wire _multipleHits_T_20 = multipleHits_leftOne_2 & multipleHits_rightOne_4; // @[Misc.scala:183:{16,61}] wire multipleHits_leftTwo_1 = _multipleHits_T_19 | _multipleHits_T_20; // @[Misc.scala:183:{37,49,61}] wire [6:0] _multipleHits_T_21 = real_hits[12:6]; // @[package.scala:45:27] wire [2:0] _multipleHits_T_22 = _multipleHits_T_21[2:0]; // @[Misc.scala:181:37, :182:39] wire _multipleHits_T_23 = _multipleHits_T_22[0]; // @[Misc.scala:181:37] wire multipleHits_leftOne_6 = _multipleHits_T_23; // @[Misc.scala:178:18, :181:37] wire [1:0] _multipleHits_T_24 = _multipleHits_T_22[2:1]; // @[Misc.scala:181:37, :182:39] wire _multipleHits_T_25 = _multipleHits_T_24[0]; // @[Misc.scala:181:37, :182:39] wire multipleHits_leftOne_7 = _multipleHits_T_25; // @[Misc.scala:178:18, :181:37] wire _multipleHits_T_26 = _multipleHits_T_24[1]; // @[Misc.scala:182:39] wire multipleHits_rightOne_5 = _multipleHits_T_26; // @[Misc.scala:178:18, :182:39] wire multipleHits_rightOne_6 = multipleHits_leftOne_7 | multipleHits_rightOne_5; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_28 = multipleHits_leftOne_7 & multipleHits_rightOne_5; // @[Misc.scala:178:18, :183:61] wire multipleHits_rightTwo_3 = _multipleHits_T_28; // @[Misc.scala:183:{49,61}] wire _multipleHits_T_29 = multipleHits_rightTwo_3; // @[Misc.scala:183:{37,49}] wire multipleHits_leftOne_8 = multipleHits_leftOne_6 | multipleHits_rightOne_6; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_30 = multipleHits_leftOne_6 & multipleHits_rightOne_6; // @[Misc.scala:178:18, :183:{16,61}] wire multipleHits_leftTwo_2 = _multipleHits_T_29 | _multipleHits_T_30; // @[Misc.scala:183:{37,49,61}] wire [3:0] _multipleHits_T_31 = _multipleHits_T_21[6:3]; // @[Misc.scala:182:39] wire [1:0] _multipleHits_T_32 = _multipleHits_T_31[1:0]; // @[Misc.scala:181:37, :182:39] wire _multipleHits_T_33 = _multipleHits_T_32[0]; // @[Misc.scala:181:37] wire multipleHits_leftOne_9 = _multipleHits_T_33; // @[Misc.scala:178:18, :181:37] wire _multipleHits_T_34 = _multipleHits_T_32[1]; // @[Misc.scala:181:37, :182:39] wire multipleHits_rightOne_7 = _multipleHits_T_34; // @[Misc.scala:178:18, :182:39] wire multipleHits_leftOne_10 = multipleHits_leftOne_9 | multipleHits_rightOne_7; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_36 = multipleHits_leftOne_9 & multipleHits_rightOne_7; // @[Misc.scala:178:18, :183:61] wire multipleHits_leftTwo_3 = _multipleHits_T_36; // @[Misc.scala:183:{49,61}] wire [1:0] _multipleHits_T_37 = _multipleHits_T_31[3:2]; // @[Misc.scala:182:39] wire _multipleHits_T_38 = _multipleHits_T_37[0]; // @[Misc.scala:181:37, :182:39] wire multipleHits_leftOne_11 = _multipleHits_T_38; // @[Misc.scala:178:18, :181:37] wire _multipleHits_T_39 = _multipleHits_T_37[1]; // @[Misc.scala:182:39] wire multipleHits_rightOne_8 = _multipleHits_T_39; // @[Misc.scala:178:18, :182:39] wire multipleHits_rightOne_9 = multipleHits_leftOne_11 | multipleHits_rightOne_8; // @[Misc.scala:178:18, :183:16] wire _multipleHits_T_41 = multipleHits_leftOne_11 & multipleHits_rightOne_8; // @[Misc.scala:178:18, :183:61] wire multipleHits_rightTwo_4 = _multipleHits_T_41; // @[Misc.scala:183:{49,61}] wire multipleHits_rightOne_10 = multipleHits_leftOne_10 | multipleHits_rightOne_9; // @[Misc.scala:183:16] wire _multipleHits_T_42 = multipleHits_leftTwo_3 | multipleHits_rightTwo_4; // @[Misc.scala:183:{37,49}] wire _multipleHits_T_43 = multipleHits_leftOne_10 & multipleHits_rightOne_9; // @[Misc.scala:183:{16,61}] wire multipleHits_rightTwo_5 = _multipleHits_T_42 | _multipleHits_T_43; // @[Misc.scala:183:{37,49,61}] wire multipleHits_rightOne_11 = multipleHits_leftOne_8 | multipleHits_rightOne_10; // @[Misc.scala:183:16] wire _multipleHits_T_44 = multipleHits_leftTwo_2 | multipleHits_rightTwo_5; // @[Misc.scala:183:{37,49}] wire _multipleHits_T_45 = multipleHits_leftOne_8 & multipleHits_rightOne_10; // @[Misc.scala:183:{16,61}] wire multipleHits_rightTwo_6 = _multipleHits_T_44 | _multipleHits_T_45; // @[Misc.scala:183:{37,49,61}] wire _multipleHits_T_46 = multipleHits_leftOne_5 | multipleHits_rightOne_11; // @[Misc.scala:183:16] wire _multipleHits_T_47 = multipleHits_leftTwo_1 | multipleHits_rightTwo_6; // @[Misc.scala:183:{37,49}] wire _multipleHits_T_48 = multipleHits_leftOne_5 & multipleHits_rightOne_11; // @[Misc.scala:183:{16,61}] wire multipleHits = _multipleHits_T_47 | _multipleHits_T_48; // @[Misc.scala:183:{37,49,61}] assign _io_req_ready_T = state == 2'h0; // @[TLB.scala:352:22, :631:25] assign io_req_ready_0 = _io_req_ready_T; // @[TLB.scala:318:7, :631:25] wire _io_resp_pf_ld_T = bad_va & cmd_read; // @[TLB.scala:568:34, :633:28] wire [13:0] _io_resp_pf_ld_T_1 = pf_ld_array & hits; // @[TLB.scala:442:17, :597:24, :633:57] wire _io_resp_pf_ld_T_2 = |_io_resp_pf_ld_T_1; // @[TLB.scala:633:{57,65}] assign _io_resp_pf_ld_T_3 = _io_resp_pf_ld_T | _io_resp_pf_ld_T_2; // @[TLB.scala:633:{28,41,65}] assign io_resp_pf_ld_0 = _io_resp_pf_ld_T_3; // @[TLB.scala:318:7, :633:41] wire _io_resp_pf_st_T = bad_va & cmd_write_perms; // @[TLB.scala:568:34, :577:35, :634:28] wire [13:0] _io_resp_pf_st_T_1 = pf_st_array & hits; // @[TLB.scala:442:17, :598:24, :634:64] wire _io_resp_pf_st_T_2 = |_io_resp_pf_st_T_1; // @[TLB.scala:634:{64,72}] assign _io_resp_pf_st_T_3 = _io_resp_pf_st_T | _io_resp_pf_st_T_2; // @[TLB.scala:634:{28,48,72}] assign io_resp_pf_st_0 = _io_resp_pf_st_T_3; // @[TLB.scala:318:7, :634:48] wire [13:0] _io_resp_pf_inst_T = pf_inst_array & hits; // @[TLB.scala:442:17, :599:67, :635:47] wire _io_resp_pf_inst_T_1 = |_io_resp_pf_inst_T; // @[TLB.scala:635:{47,55}] assign _io_resp_pf_inst_T_2 = bad_va | _io_resp_pf_inst_T_1; // @[TLB.scala:568:34, :635:{29,55}] assign io_resp_pf_inst_0 = _io_resp_pf_inst_T_2; // @[TLB.scala:318:7, :635:29] wire [13:0] _io_resp_ae_ld_T = ae_ld_array & hits; // @[TLB.scala:442:17, :586:24, :641:33] assign _io_resp_ae_ld_T_1 = |_io_resp_ae_ld_T; // @[TLB.scala:641:{33,41}] assign io_resp_ae_ld_0 = _io_resp_ae_ld_T_1; // @[TLB.scala:318:7, :641:41] wire [13:0] _io_resp_ae_st_T = ae_st_array & hits; // @[TLB.scala:442:17, :590:53, :642:33] assign _io_resp_ae_st_T_1 = |_io_resp_ae_st_T; // @[TLB.scala:642:{33,41}] assign io_resp_ae_st_0 = _io_resp_ae_st_T_1; // @[TLB.scala:318:7, :642:41] wire [13:0] _io_resp_ae_inst_T = ~px_array; // @[TLB.scala:533:87, :643:23] wire [13:0] _io_resp_ae_inst_T_1 = _io_resp_ae_inst_T & hits; // @[TLB.scala:442:17, :643:{23,33}] assign _io_resp_ae_inst_T_2 = |_io_resp_ae_inst_T_1; // @[TLB.scala:643:{33,41}] assign io_resp_ae_inst_0 = _io_resp_ae_inst_T_2; // @[TLB.scala:318:7, :643:41] assign _io_resp_ma_ld_T = misaligned & cmd_read; // @[TLB.scala:550:77, :645:31] assign io_resp_ma_ld_0 = _io_resp_ma_ld_T; // @[TLB.scala:318:7, :645:31] assign _io_resp_ma_st_T = misaligned & cmd_write; // @[TLB.scala:550:77, :646:31] assign io_resp_ma_st_0 = _io_resp_ma_st_T; // @[TLB.scala:318:7, :646:31] wire [13:0] _io_resp_cacheable_T = c_array & hits; // @[TLB.scala:442:17, :537:20, :648:33] assign _io_resp_cacheable_T_1 = |_io_resp_cacheable_T; // @[TLB.scala:648:{33,41}] assign io_resp_cacheable_0 = _io_resp_cacheable_T_1; // @[TLB.scala:318:7, :648:41] wire [13:0] _io_resp_must_alloc_T = must_alloc_array & hits; // @[TLB.scala:442:17, :595:46, :649:43] assign _io_resp_must_alloc_T_1 = |_io_resp_must_alloc_T; // @[TLB.scala:649:{43,51}] assign io_resp_must_alloc_0 = _io_resp_must_alloc_T_1; // @[TLB.scala:318:7, :649:51] wire [13:0] _io_resp_prefetchable_T = prefetchable_array & hits; // @[TLB.scala:442:17, :547:31, :650:47] wire _io_resp_prefetchable_T_1 = |_io_resp_prefetchable_T; // @[TLB.scala:650:{47,55}] assign _io_resp_prefetchable_T_2 = _io_resp_prefetchable_T_1; // @[TLB.scala:650:{55,59}] assign io_resp_prefetchable_0 = _io_resp_prefetchable_T_2; // @[TLB.scala:318:7, :650:59] wire _io_resp_miss_T_1 = _io_resp_miss_T | tlb_miss; // @[TLB.scala:613:64, :651:{29,52}] assign _io_resp_miss_T_2 = _io_resp_miss_T_1 | multipleHits; // @[Misc.scala:183:49] assign io_resp_miss_0 = _io_resp_miss_T_2; // @[TLB.scala:318:7, :651:64] wire [38:0] _io_resp_paddr_T_1 = {ppn, _io_resp_paddr_T}; // @[Mux.scala:30:73] assign io_resp_paddr_0 = _io_resp_paddr_T_1[31:0]; // @[TLB.scala:318:7, :652:{17,23}] wire [36:0] _io_resp_gpa_page_T_1 = {1'h0, vpn}; // @[TLB.scala:335:30, :657:36] wire [36:0] io_resp_gpa_page = _io_resp_gpa_page_T_1; // @[TLB.scala:657:{19,36}] wire [35:0] _io_resp_gpa_page_T_2 = r_gpa[47:12]; // @[TLB.scala:363:18, :657:58] wire [11:0] _io_resp_gpa_offset_T = r_gpa[11:0]; // @[TLB.scala:363:18, :658:47] wire [11:0] io_resp_gpa_offset = _io_resp_gpa_offset_T_1; // @[TLB.scala:658:{21,82}] assign _io_resp_gpa_T = {io_resp_gpa_page, io_resp_gpa_offset}; // @[TLB.scala:657:19, :658:21, :659:8] assign io_resp_gpa_0 = _io_resp_gpa_T; // @[TLB.scala:318:7, :659:8] assign io_ptw_req_valid_0 = _io_ptw_req_valid_T; // @[TLB.scala:318:7, :662:29] wire r_superpage_repl_addr_left_subtree_older = state_reg_1[2]; // @[Replacement.scala:168:70, :243:38] wire _r_superpage_repl_addr_T = r_superpage_repl_addr_left_subtree_state; // @[package.scala:163:13] wire _r_superpage_repl_addr_T_1 = r_superpage_repl_addr_right_subtree_state; // @[Replacement.scala:245:38, :262:12] wire _r_superpage_repl_addr_T_2 = r_superpage_repl_addr_left_subtree_older ? _r_superpage_repl_addr_T : _r_superpage_repl_addr_T_1; // @[Replacement.scala:243:38, :250:16, :262:12] wire [1:0] _r_superpage_repl_addr_T_3 = {r_superpage_repl_addr_left_subtree_older, _r_superpage_repl_addr_T_2}; // @[Replacement.scala:243:38, :249:12, :250:16] wire [1:0] r_superpage_repl_addr_valids_lo = {superpage_entries_1_valid_0, superpage_entries_0_valid_0}; // @[package.scala:45:27] wire [1:0] r_superpage_repl_addr_valids_hi = {superpage_entries_3_valid_0, superpage_entries_2_valid_0}; // @[package.scala:45:27] wire [3:0] r_superpage_repl_addr_valids = {r_superpage_repl_addr_valids_hi, r_superpage_repl_addr_valids_lo}; // @[package.scala:45:27] wire _r_superpage_repl_addr_T_4 = &r_superpage_repl_addr_valids; // @[package.scala:45:27] wire [3:0] _r_superpage_repl_addr_T_5 = ~r_superpage_repl_addr_valids; // @[package.scala:45:27] wire _r_superpage_repl_addr_T_6 = _r_superpage_repl_addr_T_5[0]; // @[OneHot.scala:48:45] wire _r_superpage_repl_addr_T_7 = _r_superpage_repl_addr_T_5[1]; // @[OneHot.scala:48:45] wire _r_superpage_repl_addr_T_8 = _r_superpage_repl_addr_T_5[2]; // @[OneHot.scala:48:45] wire _r_superpage_repl_addr_T_9 = _r_superpage_repl_addr_T_5[3]; // @[OneHot.scala:48:45] wire [1:0] _r_superpage_repl_addr_T_10 = {1'h1, ~_r_superpage_repl_addr_T_8}; // @[OneHot.scala:48:45] wire [1:0] _r_superpage_repl_addr_T_11 = _r_superpage_repl_addr_T_7 ? 2'h1 : _r_superpage_repl_addr_T_10; // @[OneHot.scala:48:45] wire [1:0] _r_superpage_repl_addr_T_12 = _r_superpage_repl_addr_T_6 ? 2'h0 : _r_superpage_repl_addr_T_11; // @[OneHot.scala:48:45] wire [1:0] _r_superpage_repl_addr_T_13 = _r_superpage_repl_addr_T_4 ? _r_superpage_repl_addr_T_3 : _r_superpage_repl_addr_T_12; // @[Mux.scala:50:70] wire r_sectored_repl_addr_left_subtree_older = state_vec_0[6]; // @[Replacement.scala:243:38, :305:17] wire r_sectored_repl_addr_left_subtree_older_1 = r_sectored_repl_addr_left_subtree_state[2]; // @[package.scala:163:13] wire r_sectored_repl_addr_left_subtree_state_1 = r_sectored_repl_addr_left_subtree_state[1]; // @[package.scala:163:13] wire _r_sectored_repl_addr_T = r_sectored_repl_addr_left_subtree_state_1; // @[package.scala:163:13] wire r_sectored_repl_addr_right_subtree_state_1 = r_sectored_repl_addr_left_subtree_state[0]; // @[package.scala:163:13] wire _r_sectored_repl_addr_T_1 = r_sectored_repl_addr_right_subtree_state_1; // @[Replacement.scala:245:38, :262:12] wire _r_sectored_repl_addr_T_2 = r_sectored_repl_addr_left_subtree_older_1 ? _r_sectored_repl_addr_T : _r_sectored_repl_addr_T_1; // @[Replacement.scala:243:38, :250:16, :262:12] wire [1:0] _r_sectored_repl_addr_T_3 = {r_sectored_repl_addr_left_subtree_older_1, _r_sectored_repl_addr_T_2}; // @[Replacement.scala:243:38, :249:12, :250:16] wire r_sectored_repl_addr_left_subtree_older_2 = r_sectored_repl_addr_right_subtree_state[2]; // @[Replacement.scala:243:38, :245:38] wire r_sectored_repl_addr_left_subtree_state_2 = r_sectored_repl_addr_right_subtree_state[1]; // @[package.scala:163:13] wire _r_sectored_repl_addr_T_4 = r_sectored_repl_addr_left_subtree_state_2; // @[package.scala:163:13] wire r_sectored_repl_addr_right_subtree_state_2 = r_sectored_repl_addr_right_subtree_state[0]; // @[Replacement.scala:245:38] wire _r_sectored_repl_addr_T_5 = r_sectored_repl_addr_right_subtree_state_2; // @[Replacement.scala:245:38, :262:12] wire _r_sectored_repl_addr_T_6 = r_sectored_repl_addr_left_subtree_older_2 ? _r_sectored_repl_addr_T_4 : _r_sectored_repl_addr_T_5; // @[Replacement.scala:243:38, :250:16, :262:12] wire [1:0] _r_sectored_repl_addr_T_7 = {r_sectored_repl_addr_left_subtree_older_2, _r_sectored_repl_addr_T_6}; // @[Replacement.scala:243:38, :249:12, :250:16] wire [1:0] _r_sectored_repl_addr_T_8 = r_sectored_repl_addr_left_subtree_older ? _r_sectored_repl_addr_T_3 : _r_sectored_repl_addr_T_7; // @[Replacement.scala:243:38, :249:12, :250:16] wire [2:0] _r_sectored_repl_addr_T_9 = {r_sectored_repl_addr_left_subtree_older, _r_sectored_repl_addr_T_8}; // @[Replacement.scala:243:38, :249:12, :250:16] wire _r_sectored_repl_addr_valids_T_1 = _r_sectored_repl_addr_valids_T | sectored_entries_0_0_valid_2; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_2 = _r_sectored_repl_addr_valids_T_1 | sectored_entries_0_0_valid_3; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_4 = _r_sectored_repl_addr_valids_T_3 | sectored_entries_0_1_valid_2; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_5 = _r_sectored_repl_addr_valids_T_4 | sectored_entries_0_1_valid_3; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_7 = _r_sectored_repl_addr_valids_T_6 | sectored_entries_0_2_valid_2; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_8 = _r_sectored_repl_addr_valids_T_7 | sectored_entries_0_2_valid_3; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_10 = _r_sectored_repl_addr_valids_T_9 | sectored_entries_0_3_valid_2; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_11 = _r_sectored_repl_addr_valids_T_10 | sectored_entries_0_3_valid_3; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_13 = _r_sectored_repl_addr_valids_T_12 | sectored_entries_0_4_valid_2; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_14 = _r_sectored_repl_addr_valids_T_13 | sectored_entries_0_4_valid_3; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_16 = _r_sectored_repl_addr_valids_T_15 | sectored_entries_0_5_valid_2; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_17 = _r_sectored_repl_addr_valids_T_16 | sectored_entries_0_5_valid_3; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_19 = _r_sectored_repl_addr_valids_T_18 | sectored_entries_0_6_valid_2; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_20 = _r_sectored_repl_addr_valids_T_19 | sectored_entries_0_6_valid_3; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_22 = _r_sectored_repl_addr_valids_T_21 | sectored_entries_0_7_valid_2; // @[package.scala:81:59] wire _r_sectored_repl_addr_valids_T_23 = _r_sectored_repl_addr_valids_T_22 | sectored_entries_0_7_valid_3; // @[package.scala:81:59] wire [1:0] r_sectored_repl_addr_valids_lo_lo = {_r_sectored_repl_addr_valids_T_5, _r_sectored_repl_addr_valids_T_2}; // @[package.scala:45:27, :81:59] wire [1:0] r_sectored_repl_addr_valids_lo_hi = {_r_sectored_repl_addr_valids_T_11, _r_sectored_repl_addr_valids_T_8}; // @[package.scala:45:27, :81:59] wire [3:0] r_sectored_repl_addr_valids_lo = {r_sectored_repl_addr_valids_lo_hi, r_sectored_repl_addr_valids_lo_lo}; // @[package.scala:45:27] wire [1:0] r_sectored_repl_addr_valids_hi_lo = {_r_sectored_repl_addr_valids_T_17, _r_sectored_repl_addr_valids_T_14}; // @[package.scala:45:27, :81:59] wire [1:0] r_sectored_repl_addr_valids_hi_hi = {_r_sectored_repl_addr_valids_T_23, _r_sectored_repl_addr_valids_T_20}; // @[package.scala:45:27, :81:59] wire [3:0] r_sectored_repl_addr_valids_hi = {r_sectored_repl_addr_valids_hi_hi, r_sectored_repl_addr_valids_hi_lo}; // @[package.scala:45:27] wire [7:0] r_sectored_repl_addr_valids = {r_sectored_repl_addr_valids_hi, r_sectored_repl_addr_valids_lo}; // @[package.scala:45:27] wire _r_sectored_repl_addr_T_10 = &r_sectored_repl_addr_valids; // @[package.scala:45:27] wire [7:0] _r_sectored_repl_addr_T_11 = ~r_sectored_repl_addr_valids; // @[package.scala:45:27] wire _r_sectored_repl_addr_T_12 = _r_sectored_repl_addr_T_11[0]; // @[OneHot.scala:48:45] wire _r_sectored_repl_addr_T_13 = _r_sectored_repl_addr_T_11[1]; // @[OneHot.scala:48:45] wire _r_sectored_repl_addr_T_14 = _r_sectored_repl_addr_T_11[2]; // @[OneHot.scala:48:45] wire _r_sectored_repl_addr_T_15 = _r_sectored_repl_addr_T_11[3]; // @[OneHot.scala:48:45] wire _r_sectored_repl_addr_T_16 = _r_sectored_repl_addr_T_11[4]; // @[OneHot.scala:48:45] wire _r_sectored_repl_addr_T_17 = _r_sectored_repl_addr_T_11[5]; // @[OneHot.scala:48:45] wire _r_sectored_repl_addr_T_18 = _r_sectored_repl_addr_T_11[6]; // @[OneHot.scala:48:45] wire _r_sectored_repl_addr_T_19 = _r_sectored_repl_addr_T_11[7]; // @[OneHot.scala:48:45] wire [2:0] _r_sectored_repl_addr_T_20 = {2'h3, ~_r_sectored_repl_addr_T_18}; // @[OneHot.scala:48:45] wire [2:0] _r_sectored_repl_addr_T_21 = _r_sectored_repl_addr_T_17 ? 3'h5 : _r_sectored_repl_addr_T_20; // @[OneHot.scala:48:45] wire [2:0] _r_sectored_repl_addr_T_22 = _r_sectored_repl_addr_T_16 ? 3'h4 : _r_sectored_repl_addr_T_21; // @[OneHot.scala:48:45] wire [2:0] _r_sectored_repl_addr_T_23 = _r_sectored_repl_addr_T_15 ? 3'h3 : _r_sectored_repl_addr_T_22; // @[OneHot.scala:48:45] wire [2:0] _r_sectored_repl_addr_T_24 = _r_sectored_repl_addr_T_14 ? 3'h2 : _r_sectored_repl_addr_T_23; // @[OneHot.scala:48:45] wire [2:0] _r_sectored_repl_addr_T_25 = _r_sectored_repl_addr_T_13 ? 3'h1 : _r_sectored_repl_addr_T_24; // @[OneHot.scala:48:45] wire [2:0] _r_sectored_repl_addr_T_26 = _r_sectored_repl_addr_T_12 ? 3'h0 : _r_sectored_repl_addr_T_25; // @[OneHot.scala:48:45] wire [2:0] _r_sectored_repl_addr_T_27 = _r_sectored_repl_addr_T_10 ? _r_sectored_repl_addr_T_9 : _r_sectored_repl_addr_T_26; // @[Mux.scala:50:70] wire _r_sectored_hit_valid_T = sector_hits_0 | sector_hits_1; // @[package.scala:81:59] wire _r_sectored_hit_valid_T_1 = _r_sectored_hit_valid_T | sector_hits_2; // @[package.scala:81:59] wire _r_sectored_hit_valid_T_2 = _r_sectored_hit_valid_T_1 | sector_hits_3; // @[package.scala:81:59] wire _r_sectored_hit_valid_T_3 = _r_sectored_hit_valid_T_2 | sector_hits_4; // @[package.scala:81:59] wire _r_sectored_hit_valid_T_4 = _r_sectored_hit_valid_T_3 | sector_hits_5; // @[package.scala:81:59] wire _r_sectored_hit_valid_T_5 = _r_sectored_hit_valid_T_4 | sector_hits_6; // @[package.scala:81:59] wire _r_sectored_hit_valid_T_6 = _r_sectored_hit_valid_T_5 | sector_hits_7; // @[package.scala:81:59] wire [3:0] r_sectored_hit_bits_lo = {r_sectored_hit_bits_lo_hi, r_sectored_hit_bits_lo_lo}; // @[OneHot.scala:21:45] wire [3:0] r_sectored_hit_bits_hi = {r_sectored_hit_bits_hi_hi, r_sectored_hit_bits_hi_lo}; // @[OneHot.scala:21:45] wire [7:0] _r_sectored_hit_bits_T = {r_sectored_hit_bits_hi, r_sectored_hit_bits_lo}; // @[OneHot.scala:21:45] wire [3:0] r_sectored_hit_bits_hi_1 = _r_sectored_hit_bits_T[7:4]; // @[OneHot.scala:21:45, :30:18] wire [3:0] r_sectored_hit_bits_lo_1 = _r_sectored_hit_bits_T[3:0]; // @[OneHot.scala:21:45, :31:18] wire _r_sectored_hit_bits_T_1 = |r_sectored_hit_bits_hi_1; // @[OneHot.scala:30:18, :32:14] wire [3:0] _r_sectored_hit_bits_T_2 = r_sectored_hit_bits_hi_1 | r_sectored_hit_bits_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28] wire [1:0] r_sectored_hit_bits_hi_2 = _r_sectored_hit_bits_T_2[3:2]; // @[OneHot.scala:30:18, :32:28] wire [1:0] r_sectored_hit_bits_lo_2 = _r_sectored_hit_bits_T_2[1:0]; // @[OneHot.scala:31:18, :32:28] wire _r_sectored_hit_bits_T_3 = |r_sectored_hit_bits_hi_2; // @[OneHot.scala:30:18, :32:14] wire [1:0] _r_sectored_hit_bits_T_4 = r_sectored_hit_bits_hi_2 | r_sectored_hit_bits_lo_2; // @[OneHot.scala:30:18, :31:18, :32:28] wire _r_sectored_hit_bits_T_5 = _r_sectored_hit_bits_T_4[1]; // @[OneHot.scala:32:28] wire [1:0] _r_sectored_hit_bits_T_6 = {_r_sectored_hit_bits_T_3, _r_sectored_hit_bits_T_5}; // @[OneHot.scala:32:{10,14}] wire [2:0] _r_sectored_hit_bits_T_7 = {_r_sectored_hit_bits_T_1, _r_sectored_hit_bits_T_6}; // @[OneHot.scala:32:{10,14}] wire _r_superpage_hit_valid_T = superpage_hits_0 | superpage_hits_1; // @[package.scala:81:59] wire _r_superpage_hit_valid_T_1 = _r_superpage_hit_valid_T | superpage_hits_2; // @[package.scala:81:59] wire _r_superpage_hit_valid_T_2 = _r_superpage_hit_valid_T_1 | superpage_hits_3; // @[package.scala:81:59] wire [3:0] _r_superpage_hit_bits_T = {r_superpage_hit_bits_hi, r_superpage_hit_bits_lo}; // @[OneHot.scala:21:45] wire [1:0] r_superpage_hit_bits_hi_1 = _r_superpage_hit_bits_T[3:2]; // @[OneHot.scala:21:45, :30:18] wire [1:0] r_superpage_hit_bits_lo_1 = _r_superpage_hit_bits_T[1:0]; // @[OneHot.scala:21:45, :31:18] wire _r_superpage_hit_bits_T_1 = |r_superpage_hit_bits_hi_1; // @[OneHot.scala:30:18, :32:14] wire [1:0] _r_superpage_hit_bits_T_2 = r_superpage_hit_bits_hi_1 | r_superpage_hit_bits_lo_1; // @[OneHot.scala:30:18, :31:18, :32:28] wire _r_superpage_hit_bits_T_3 = _r_superpage_hit_bits_T_2[1]; // @[OneHot.scala:32:28] wire [1:0] _r_superpage_hit_bits_T_4 = {_r_superpage_hit_bits_T_1, _r_superpage_hit_bits_T_3}; // @[OneHot.scala:32:{10,14}] wire [1:0] _state_T = {1'h1, io_sfence_valid_0}; // @[TLB.scala:318:7, :704:45]
Generate the Verilog code corresponding to the following Chisel files. File WidthWidget.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.AddressSet import freechips.rocketchip.util.{Repeater, UIntToOH1} // innBeatBytes => the new client-facing bus width class TLWidthWidget(innerBeatBytes: Int)(implicit p: Parameters) extends LazyModule { private def noChangeRequired(manager: TLManagerPortParameters) = manager.beatBytes == innerBeatBytes val node = new TLAdapterNode( clientFn = { case c => c }, managerFn = { case m => m.v1copy(beatBytes = innerBeatBytes) }){ override def circuitIdentity = edges.out.map(_.manager).forall(noChangeRequired) } override lazy val desiredName = s"TLWidthWidget$innerBeatBytes" lazy val module = new Impl class Impl extends LazyModuleImp(this) { def merge[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T]) = { val inBytes = edgeIn.manager.beatBytes val outBytes = edgeOut.manager.beatBytes val ratio = outBytes / inBytes val keepBits = log2Ceil(outBytes) val dropBits = log2Ceil(inBytes) val countBits = log2Ceil(ratio) val size = edgeIn.size(in.bits) val hasData = edgeIn.hasData(in.bits) val limit = UIntToOH1(size, keepBits) >> dropBits val count = RegInit(0.U(countBits.W)) val first = count === 0.U val last = count === limit || !hasData val enable = Seq.tabulate(ratio) { i => !((count ^ i.U) & limit).orR } val corrupt_reg = RegInit(false.B) val corrupt_in = edgeIn.corrupt(in.bits) val corrupt_out = corrupt_in || corrupt_reg when (in.fire) { count := count + 1.U corrupt_reg := corrupt_out when (last) { count := 0.U corrupt_reg := false.B } } def helper(idata: UInt): UInt = { // rdata is X until the first time a multi-beat write occurs. // Prevent the X from leaking outside by jamming the mux control until // the first time rdata is written (and hence no longer X). val rdata_written_once = RegInit(false.B) val masked_enable = enable.map(_ || !rdata_written_once) val odata = Seq.fill(ratio) { WireInit(idata) } val rdata = Reg(Vec(ratio-1, chiselTypeOf(idata))) val pdata = rdata :+ idata val mdata = (masked_enable zip (odata zip pdata)) map { case (e, (o, p)) => Mux(e, o, p) } when (in.fire && !last) { rdata_written_once := true.B (rdata zip mdata) foreach { case (r, m) => r := m } } Cat(mdata.reverse) } in.ready := out.ready || !last out.valid := in.valid && last out.bits := in.bits // Don't put down hardware if we never carry data edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits))) edgeOut.corrupt(out.bits) := corrupt_out (out.bits, in.bits) match { case (o: TLBundleA, i: TLBundleA) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W)) case (o: TLBundleB, i: TLBundleB) => o.mask := edgeOut.mask(o.address, o.size) & Mux(hasData, helper(i.mask), ~0.U(outBytes.W)) case (o: TLBundleC, i: TLBundleC) => () case (o: TLBundleD, i: TLBundleD) => () case _ => require(false, "Impossible bundle combination in WidthWidget") } } def split[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = { val inBytes = edgeIn.manager.beatBytes val outBytes = edgeOut.manager.beatBytes val ratio = inBytes / outBytes val keepBits = log2Ceil(inBytes) val dropBits = log2Ceil(outBytes) val countBits = log2Ceil(ratio) val size = edgeIn.size(in.bits) val hasData = edgeIn.hasData(in.bits) val limit = UIntToOH1(size, keepBits) >> dropBits val count = RegInit(0.U(countBits.W)) val first = count === 0.U val last = count === limit || !hasData when (out.fire) { count := count + 1.U when (last) { count := 0.U } } // For sub-beat transfer, extract which part matters val sel = in.bits match { case a: TLBundleA => a.address(keepBits-1, dropBits) case b: TLBundleB => b.address(keepBits-1, dropBits) case c: TLBundleC => c.address(keepBits-1, dropBits) case d: TLBundleD => { val sel = sourceMap(d.source) val hold = Mux(first, sel, RegEnable(sel, first)) // a_first is not for whole xfer hold & ~limit // if more than one a_first/xfer, the address must be aligned anyway } } val index = sel | count def helper(idata: UInt, width: Int): UInt = { val mux = VecInit.tabulate(ratio) { i => idata((i+1)*outBytes*width-1, i*outBytes*width) } mux(index) } out.bits := in.bits out.valid := in.valid in.ready := out.ready // Don't put down hardware if we never carry data edgeOut.data(out.bits) := (if (edgeIn.staticHasData(in.bits) == Some(false)) 0.U else helper(edgeIn.data(in.bits), 8)) (out.bits, in.bits) match { case (o: TLBundleA, i: TLBundleA) => o.mask := helper(i.mask, 1) case (o: TLBundleB, i: TLBundleB) => o.mask := helper(i.mask, 1) case (o: TLBundleC, i: TLBundleC) => () // replicating corrupt to all beats is ok case (o: TLBundleD, i: TLBundleD) => () case _ => require(false, "Impossbile bundle combination in WidthWidget") } // Repeat the input if we're not last !last } def splice[T <: TLDataChannel](edgeIn: TLEdge, in: DecoupledIO[T], edgeOut: TLEdge, out: DecoupledIO[T], sourceMap: UInt => UInt) = { if (edgeIn.manager.beatBytes == edgeOut.manager.beatBytes) { // nothing to do; pass it through out.bits := in.bits out.valid := in.valid in.ready := out.ready } else if (edgeIn.manager.beatBytes > edgeOut.manager.beatBytes) { // split input to output val repeat = Wire(Bool()) val repeated = Repeater(in, repeat) val cated = Wire(chiselTypeOf(repeated)) cated <> repeated edgeIn.data(cated.bits) := Cat( edgeIn.data(repeated.bits)(edgeIn.manager.beatBytes*8-1, edgeOut.manager.beatBytes*8), edgeIn.data(in.bits)(edgeOut.manager.beatBytes*8-1, 0)) repeat := split(edgeIn, cated, edgeOut, out, sourceMap) } else { // merge input to output merge(edgeIn, in, edgeOut, out) } } (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => // If the master is narrower than the slave, the D channel must be narrowed. // This is tricky, because the D channel has no address data. // Thus, you don't know which part of a sub-beat transfer to extract. // To fix this, we record the relevant address bits for all sources. // The assumption is that this sort of situation happens only where // you connect a narrow master to the system bus, so there are few sources. def sourceMap(source_bits: UInt) = { val source = if (edgeIn.client.endSourceId == 1) 0.U(0.W) else source_bits require (edgeOut.manager.beatBytes > edgeIn.manager.beatBytes) val keepBits = log2Ceil(edgeOut.manager.beatBytes) val dropBits = log2Ceil(edgeIn.manager.beatBytes) val sources = Reg(Vec(edgeIn.client.endSourceId, UInt((keepBits-dropBits).W))) val a_sel = in.a.bits.address(keepBits-1, dropBits) when (in.a.fire) { if (edgeIn.client.endSourceId == 1) { // avoid extraction-index-width warning sources(0) := a_sel } else { sources(in.a.bits.source) := a_sel } } // depopulate unused source registers: edgeIn.client.unusedSources.foreach { id => sources(id) := 0.U } val bypass = in.a.valid && in.a.bits.source === source if (edgeIn.manager.minLatency > 0) sources(source) else Mux(bypass, a_sel, sources(source)) } splice(edgeIn, in.a, edgeOut, out.a, sourceMap) splice(edgeOut, out.d, edgeIn, in.d, sourceMap) if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) { splice(edgeOut, out.b, edgeIn, in.b, sourceMap) splice(edgeIn, in.c, edgeOut, out.c, sourceMap) out.e.valid := in.e.valid out.e.bits := in.e.bits in.e.ready := out.e.ready } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLWidthWidget { def apply(innerBeatBytes: Int)(implicit p: Parameters): TLNode = { val widget = LazyModule(new TLWidthWidget(innerBeatBytes)) widget.node } def apply(wrapper: TLBusWrapper)(implicit p: Parameters): TLNode = apply(wrapper.beatBytes) } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMWidthWidget(first: Int, second: Int, txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("WidthWidget")) val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff))) (ram.node := TLDelayer(0.1) := TLFragmenter(4, 256) := TLWidthWidget(second) := TLWidthWidget(first) := TLDelayer(0.1) := model.node := fuzz.node) lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMWidthWidgetTest(little: Int, big: Int, txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMWidthWidget(little,big,txns)).module) dut.io.start := DontCare io.finished := dut.io.finished } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } }
module TLInterconnectCoupler_cbus_to_sodor_tile( // @[LazyModuleImp.scala:138:7] input clock, // @[LazyModuleImp.scala:138:7] input reset, // @[LazyModuleImp.scala:138:7] input auto_tl_slave_clock_xing_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_slave_clock_xing_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_slave_clock_xing_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_slave_clock_xing_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_slave_clock_xing_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_tl_slave_clock_xing_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_tl_slave_clock_xing_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [3:0] auto_tl_slave_clock_xing_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [31:0] auto_tl_slave_clock_xing_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_tl_slave_clock_xing_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_tl_slave_clock_xing_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_slave_clock_xing_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_slave_clock_xing_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_tl_slave_clock_xing_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_slave_clock_xing_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_tl_slave_clock_xing_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input auto_tl_slave_clock_xing_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_tl_slave_clock_xing_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [31:0] auto_tl_slave_clock_xing_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_tl_slave_clock_xing_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_tl_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_tl_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_tl_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_tl_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [31:0] auto_tl_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_tl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_tl_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_tl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_tl_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_tl_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_tl_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_tl_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_tl_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output auto_tl_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_tl_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_tl_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_tl_in_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); wire tlOut_d_valid; // @[MixedNode.scala:542:17] wire tlOut_d_bits_corrupt; // @[MixedNode.scala:542:17] wire [63:0] tlOut_d_bits_data; // @[MixedNode.scala:542:17] wire tlOut_d_bits_denied; // @[MixedNode.scala:542:17] wire tlOut_d_bits_sink; // @[MixedNode.scala:542:17] wire [6:0] tlOut_d_bits_source; // @[MixedNode.scala:542:17] wire [2:0] tlOut_d_bits_size; // @[MixedNode.scala:542:17] wire [1:0] tlOut_d_bits_param; // @[MixedNode.scala:542:17] wire [2:0] tlOut_d_bits_opcode; // @[MixedNode.scala:542:17] wire tlOut_a_ready; // @[MixedNode.scala:542:17] wire auto_tl_slave_clock_xing_out_a_ready_0 = auto_tl_slave_clock_xing_out_a_ready; // @[LazyModuleImp.scala:138:7] wire auto_tl_slave_clock_xing_out_d_valid_0 = auto_tl_slave_clock_xing_out_d_valid; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_slave_clock_xing_out_d_bits_opcode_0 = auto_tl_slave_clock_xing_out_d_bits_opcode; // @[LazyModuleImp.scala:138:7] wire [1:0] auto_tl_slave_clock_xing_out_d_bits_param_0 = auto_tl_slave_clock_xing_out_d_bits_param; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_slave_clock_xing_out_d_bits_size_0 = auto_tl_slave_clock_xing_out_d_bits_size; // @[LazyModuleImp.scala:138:7] wire [6:0] auto_tl_slave_clock_xing_out_d_bits_source_0 = auto_tl_slave_clock_xing_out_d_bits_source; // @[LazyModuleImp.scala:138:7] wire auto_tl_slave_clock_xing_out_d_bits_sink_0 = auto_tl_slave_clock_xing_out_d_bits_sink; // @[LazyModuleImp.scala:138:7] wire auto_tl_slave_clock_xing_out_d_bits_denied_0 = auto_tl_slave_clock_xing_out_d_bits_denied; // @[LazyModuleImp.scala:138:7] wire [31:0] auto_tl_slave_clock_xing_out_d_bits_data_0 = auto_tl_slave_clock_xing_out_d_bits_data; // @[LazyModuleImp.scala:138:7] wire auto_tl_slave_clock_xing_out_d_bits_corrupt_0 = auto_tl_slave_clock_xing_out_d_bits_corrupt; // @[LazyModuleImp.scala:138:7] wire auto_tl_in_a_valid_0 = auto_tl_in_a_valid; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_in_a_bits_opcode_0 = auto_tl_in_a_bits_opcode; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_in_a_bits_param_0 = auto_tl_in_a_bits_param; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_in_a_bits_size_0 = auto_tl_in_a_bits_size; // @[LazyModuleImp.scala:138:7] wire [6:0] auto_tl_in_a_bits_source_0 = auto_tl_in_a_bits_source; // @[LazyModuleImp.scala:138:7] wire [31:0] auto_tl_in_a_bits_address_0 = auto_tl_in_a_bits_address; // @[LazyModuleImp.scala:138:7] wire [7:0] auto_tl_in_a_bits_mask_0 = auto_tl_in_a_bits_mask; // @[LazyModuleImp.scala:138:7] wire [63:0] auto_tl_in_a_bits_data_0 = auto_tl_in_a_bits_data; // @[LazyModuleImp.scala:138:7] wire auto_tl_in_a_bits_corrupt_0 = auto_tl_in_a_bits_corrupt; // @[LazyModuleImp.scala:138:7] wire auto_tl_in_d_ready_0 = auto_tl_in_d_ready; // @[LazyModuleImp.scala:138:7] wire tlSlaveClockXingOut_a_ready = auto_tl_slave_clock_xing_out_a_ready_0; // @[MixedNode.scala:542:17] wire tlSlaveClockXingOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] tlSlaveClockXingOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] tlSlaveClockXingOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] tlSlaveClockXingOut_a_bits_size; // @[MixedNode.scala:542:17] wire [6:0] tlSlaveClockXingOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] tlSlaveClockXingOut_a_bits_address; // @[MixedNode.scala:542:17] wire [3:0] tlSlaveClockXingOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [31:0] tlSlaveClockXingOut_a_bits_data; // @[MixedNode.scala:542:17] wire tlSlaveClockXingOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire tlSlaveClockXingOut_d_ready; // @[MixedNode.scala:542:17] wire tlSlaveClockXingOut_d_valid = auto_tl_slave_clock_xing_out_d_valid_0; // @[MixedNode.scala:542:17] wire [2:0] tlSlaveClockXingOut_d_bits_opcode = auto_tl_slave_clock_xing_out_d_bits_opcode_0; // @[MixedNode.scala:542:17] wire [1:0] tlSlaveClockXingOut_d_bits_param = auto_tl_slave_clock_xing_out_d_bits_param_0; // @[MixedNode.scala:542:17] wire [2:0] tlSlaveClockXingOut_d_bits_size = auto_tl_slave_clock_xing_out_d_bits_size_0; // @[MixedNode.scala:542:17] wire [6:0] tlSlaveClockXingOut_d_bits_source = auto_tl_slave_clock_xing_out_d_bits_source_0; // @[MixedNode.scala:542:17] wire tlSlaveClockXingOut_d_bits_sink = auto_tl_slave_clock_xing_out_d_bits_sink_0; // @[MixedNode.scala:542:17] wire tlSlaveClockXingOut_d_bits_denied = auto_tl_slave_clock_xing_out_d_bits_denied_0; // @[MixedNode.scala:542:17] wire [31:0] tlSlaveClockXingOut_d_bits_data = auto_tl_slave_clock_xing_out_d_bits_data_0; // @[MixedNode.scala:542:17] wire tlIn_a_ready; // @[MixedNode.scala:551:17] wire tlSlaveClockXingOut_d_bits_corrupt = auto_tl_slave_clock_xing_out_d_bits_corrupt_0; // @[MixedNode.scala:542:17] wire tlIn_a_valid = auto_tl_in_a_valid_0; // @[MixedNode.scala:551:17] wire [2:0] tlIn_a_bits_opcode = auto_tl_in_a_bits_opcode_0; // @[MixedNode.scala:551:17] wire [2:0] tlIn_a_bits_param = auto_tl_in_a_bits_param_0; // @[MixedNode.scala:551:17] wire [2:0] tlIn_a_bits_size = auto_tl_in_a_bits_size_0; // @[MixedNode.scala:551:17] wire [6:0] tlIn_a_bits_source = auto_tl_in_a_bits_source_0; // @[MixedNode.scala:551:17] wire [31:0] tlIn_a_bits_address = auto_tl_in_a_bits_address_0; // @[MixedNode.scala:551:17] wire [7:0] tlIn_a_bits_mask = auto_tl_in_a_bits_mask_0; // @[MixedNode.scala:551:17] wire [63:0] tlIn_a_bits_data = auto_tl_in_a_bits_data_0; // @[MixedNode.scala:551:17] wire tlIn_a_bits_corrupt = auto_tl_in_a_bits_corrupt_0; // @[MixedNode.scala:551:17] wire tlIn_d_ready = auto_tl_in_d_ready_0; // @[MixedNode.scala:551:17] wire tlIn_d_valid; // @[MixedNode.scala:551:17] wire [2:0] tlIn_d_bits_opcode; // @[MixedNode.scala:551:17] wire [1:0] tlIn_d_bits_param; // @[MixedNode.scala:551:17] wire [2:0] tlIn_d_bits_size; // @[MixedNode.scala:551:17] wire [6:0] tlIn_d_bits_source; // @[MixedNode.scala:551:17] wire tlIn_d_bits_sink; // @[MixedNode.scala:551:17] wire tlIn_d_bits_denied; // @[MixedNode.scala:551:17] wire [63:0] tlIn_d_bits_data; // @[MixedNode.scala:551:17] wire tlIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire [2:0] auto_tl_slave_clock_xing_out_a_bits_opcode_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_slave_clock_xing_out_a_bits_param_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_slave_clock_xing_out_a_bits_size_0; // @[LazyModuleImp.scala:138:7] wire [6:0] auto_tl_slave_clock_xing_out_a_bits_source_0; // @[LazyModuleImp.scala:138:7] wire [31:0] auto_tl_slave_clock_xing_out_a_bits_address_0; // @[LazyModuleImp.scala:138:7] wire [3:0] auto_tl_slave_clock_xing_out_a_bits_mask_0; // @[LazyModuleImp.scala:138:7] wire [31:0] auto_tl_slave_clock_xing_out_a_bits_data_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_slave_clock_xing_out_a_bits_corrupt_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_slave_clock_xing_out_a_valid_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_slave_clock_xing_out_d_ready_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_in_a_ready_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_in_d_bits_opcode_0; // @[LazyModuleImp.scala:138:7] wire [1:0] auto_tl_in_d_bits_param_0; // @[LazyModuleImp.scala:138:7] wire [2:0] auto_tl_in_d_bits_size_0; // @[LazyModuleImp.scala:138:7] wire [6:0] auto_tl_in_d_bits_source_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_in_d_bits_sink_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_in_d_bits_denied_0; // @[LazyModuleImp.scala:138:7] wire [63:0] auto_tl_in_d_bits_data_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_in_d_bits_corrupt_0; // @[LazyModuleImp.scala:138:7] wire auto_tl_in_d_valid_0; // @[LazyModuleImp.scala:138:7] assign tlIn_a_ready = tlOut_a_ready; // @[MixedNode.scala:542:17, :551:17] assign tlIn_d_valid = tlOut_d_valid; // @[MixedNode.scala:542:17, :551:17] assign tlIn_d_bits_opcode = tlOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlIn_d_bits_param = tlOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlIn_d_bits_size = tlOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlIn_d_bits_source = tlOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlIn_d_bits_sink = tlOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] assign tlIn_d_bits_denied = tlOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] assign tlIn_d_bits_data = tlOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] tlOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] tlOut_a_bits_size; // @[MixedNode.scala:542:17] wire [6:0] tlOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] tlOut_a_bits_address; // @[MixedNode.scala:542:17] wire [7:0] tlOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [63:0] tlOut_a_bits_data; // @[MixedNode.scala:542:17] wire tlOut_a_bits_corrupt; // @[MixedNode.scala:542:17] assign tlIn_d_bits_corrupt = tlOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire tlOut_a_valid; // @[MixedNode.scala:542:17] wire tlOut_d_ready; // @[MixedNode.scala:542:17] assign auto_tl_in_a_ready_0 = tlIn_a_ready; // @[MixedNode.scala:551:17] assign tlOut_a_valid = tlIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_opcode = tlIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_param = tlIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_size = tlIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_source = tlIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_address = tlIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_mask = tlIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_data = tlIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign tlOut_a_bits_corrupt = tlIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign tlOut_d_ready = tlIn_d_ready; // @[MixedNode.scala:542:17, :551:17] assign auto_tl_in_d_valid_0 = tlIn_d_valid; // @[MixedNode.scala:551:17] assign auto_tl_in_d_bits_opcode_0 = tlIn_d_bits_opcode; // @[MixedNode.scala:551:17] assign auto_tl_in_d_bits_param_0 = tlIn_d_bits_param; // @[MixedNode.scala:551:17] assign auto_tl_in_d_bits_size_0 = tlIn_d_bits_size; // @[MixedNode.scala:551:17] assign auto_tl_in_d_bits_source_0 = tlIn_d_bits_source; // @[MixedNode.scala:551:17] assign auto_tl_in_d_bits_sink_0 = tlIn_d_bits_sink; // @[MixedNode.scala:551:17] assign auto_tl_in_d_bits_denied_0 = tlIn_d_bits_denied; // @[MixedNode.scala:551:17] assign auto_tl_in_d_bits_data_0 = tlIn_d_bits_data; // @[MixedNode.scala:551:17] assign auto_tl_in_d_bits_corrupt_0 = tlIn_d_bits_corrupt; // @[MixedNode.scala:551:17] wire tlSlaveClockXingIn_a_ready = tlSlaveClockXingOut_a_ready; // @[MixedNode.scala:542:17, :551:17] wire tlSlaveClockXingIn_a_valid; // @[MixedNode.scala:551:17] assign auto_tl_slave_clock_xing_out_a_valid_0 = tlSlaveClockXingOut_a_valid; // @[MixedNode.scala:542:17] wire [2:0] tlSlaveClockXingIn_a_bits_opcode; // @[MixedNode.scala:551:17] assign auto_tl_slave_clock_xing_out_a_bits_opcode_0 = tlSlaveClockXingOut_a_bits_opcode; // @[MixedNode.scala:542:17] wire [2:0] tlSlaveClockXingIn_a_bits_param; // @[MixedNode.scala:551:17] assign auto_tl_slave_clock_xing_out_a_bits_param_0 = tlSlaveClockXingOut_a_bits_param; // @[MixedNode.scala:542:17] wire [2:0] tlSlaveClockXingIn_a_bits_size; // @[MixedNode.scala:551:17] assign auto_tl_slave_clock_xing_out_a_bits_size_0 = tlSlaveClockXingOut_a_bits_size; // @[MixedNode.scala:542:17] wire [6:0] tlSlaveClockXingIn_a_bits_source; // @[MixedNode.scala:551:17] assign auto_tl_slave_clock_xing_out_a_bits_source_0 = tlSlaveClockXingOut_a_bits_source; // @[MixedNode.scala:542:17] wire [31:0] tlSlaveClockXingIn_a_bits_address; // @[MixedNode.scala:551:17] assign auto_tl_slave_clock_xing_out_a_bits_address_0 = tlSlaveClockXingOut_a_bits_address; // @[MixedNode.scala:542:17] wire [3:0] tlSlaveClockXingIn_a_bits_mask; // @[MixedNode.scala:551:17] assign auto_tl_slave_clock_xing_out_a_bits_mask_0 = tlSlaveClockXingOut_a_bits_mask; // @[MixedNode.scala:542:17] wire [31:0] tlSlaveClockXingIn_a_bits_data; // @[MixedNode.scala:551:17] assign auto_tl_slave_clock_xing_out_a_bits_data_0 = tlSlaveClockXingOut_a_bits_data; // @[MixedNode.scala:542:17] wire tlSlaveClockXingIn_a_bits_corrupt; // @[MixedNode.scala:551:17] assign auto_tl_slave_clock_xing_out_a_bits_corrupt_0 = tlSlaveClockXingOut_a_bits_corrupt; // @[MixedNode.scala:542:17] wire tlSlaveClockXingIn_d_ready; // @[MixedNode.scala:551:17] assign auto_tl_slave_clock_xing_out_d_ready_0 = tlSlaveClockXingOut_d_ready; // @[MixedNode.scala:542:17] wire tlSlaveClockXingIn_d_valid = tlSlaveClockXingOut_d_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlSlaveClockXingIn_d_bits_opcode = tlSlaveClockXingOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] tlSlaveClockXingIn_d_bits_param = tlSlaveClockXingOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [2:0] tlSlaveClockXingIn_d_bits_size = tlSlaveClockXingOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [6:0] tlSlaveClockXingIn_d_bits_source = tlSlaveClockXingOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] wire tlSlaveClockXingIn_d_bits_sink = tlSlaveClockXingOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire tlSlaveClockXingIn_d_bits_denied = tlSlaveClockXingOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] wire [31:0] tlSlaveClockXingIn_d_bits_data = tlSlaveClockXingOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] wire tlSlaveClockXingIn_d_bits_corrupt = tlSlaveClockXingOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire no_bufferOut_a_ready = tlSlaveClockXingIn_a_ready; // @[MixedNode.scala:542:17, :551:17] wire no_bufferOut_a_valid; // @[MixedNode.scala:542:17] assign tlSlaveClockXingOut_a_valid = tlSlaveClockXingIn_a_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferOut_a_bits_opcode; // @[MixedNode.scala:542:17] assign tlSlaveClockXingOut_a_bits_opcode = tlSlaveClockXingIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferOut_a_bits_param; // @[MixedNode.scala:542:17] assign tlSlaveClockXingOut_a_bits_param = tlSlaveClockXingIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferOut_a_bits_size; // @[MixedNode.scala:542:17] assign tlSlaveClockXingOut_a_bits_size = tlSlaveClockXingIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [6:0] no_bufferOut_a_bits_source; // @[MixedNode.scala:542:17] assign tlSlaveClockXingOut_a_bits_source = tlSlaveClockXingIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [31:0] no_bufferOut_a_bits_address; // @[MixedNode.scala:542:17] assign tlSlaveClockXingOut_a_bits_address = tlSlaveClockXingIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] wire [3:0] no_bufferOut_a_bits_mask; // @[MixedNode.scala:542:17] assign tlSlaveClockXingOut_a_bits_mask = tlSlaveClockXingIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] wire [31:0] no_bufferOut_a_bits_data; // @[MixedNode.scala:542:17] assign tlSlaveClockXingOut_a_bits_data = tlSlaveClockXingIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] wire no_bufferOut_a_bits_corrupt; // @[MixedNode.scala:542:17] assign tlSlaveClockXingOut_a_bits_corrupt = tlSlaveClockXingIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire no_bufferOut_d_ready; // @[MixedNode.scala:542:17] assign tlSlaveClockXingOut_d_ready = tlSlaveClockXingIn_d_ready; // @[MixedNode.scala:542:17, :551:17] wire no_bufferOut_d_valid = tlSlaveClockXingIn_d_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferOut_d_bits_opcode = tlSlaveClockXingIn_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] no_bufferOut_d_bits_param = tlSlaveClockXingIn_d_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferOut_d_bits_size = tlSlaveClockXingIn_d_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [6:0] no_bufferOut_d_bits_source = tlSlaveClockXingIn_d_bits_source; // @[MixedNode.scala:542:17, :551:17] wire no_bufferOut_d_bits_sink = tlSlaveClockXingIn_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire no_bufferOut_d_bits_denied = tlSlaveClockXingIn_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] wire [31:0] no_bufferOut_d_bits_data = tlSlaveClockXingIn_d_bits_data; // @[MixedNode.scala:542:17, :551:17] wire no_bufferOut_d_bits_corrupt = tlSlaveClockXingIn_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_a_ready = no_bufferOut_a_ready; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_a_valid; // @[MixedNode.scala:551:17] assign tlSlaveClockXingIn_a_valid = no_bufferOut_a_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferIn_a_bits_opcode; // @[MixedNode.scala:551:17] assign tlSlaveClockXingIn_a_bits_opcode = no_bufferOut_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferIn_a_bits_param; // @[MixedNode.scala:551:17] assign tlSlaveClockXingIn_a_bits_param = no_bufferOut_a_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferIn_a_bits_size; // @[MixedNode.scala:551:17] assign tlSlaveClockXingIn_a_bits_size = no_bufferOut_a_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [6:0] no_bufferIn_a_bits_source; // @[MixedNode.scala:551:17] assign tlSlaveClockXingIn_a_bits_source = no_bufferOut_a_bits_source; // @[MixedNode.scala:542:17, :551:17] wire [31:0] no_bufferIn_a_bits_address; // @[MixedNode.scala:551:17] assign tlSlaveClockXingIn_a_bits_address = no_bufferOut_a_bits_address; // @[MixedNode.scala:542:17, :551:17] wire [3:0] no_bufferIn_a_bits_mask; // @[MixedNode.scala:551:17] assign tlSlaveClockXingIn_a_bits_mask = no_bufferOut_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] wire [31:0] no_bufferIn_a_bits_data; // @[MixedNode.scala:551:17] assign tlSlaveClockXingIn_a_bits_data = no_bufferOut_a_bits_data; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_a_bits_corrupt; // @[MixedNode.scala:551:17] assign tlSlaveClockXingIn_a_bits_corrupt = no_bufferOut_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_d_ready; // @[MixedNode.scala:551:17] assign tlSlaveClockXingIn_d_ready = no_bufferOut_d_ready; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_d_valid = no_bufferOut_d_valid; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferIn_d_bits_opcode = no_bufferOut_d_bits_opcode; // @[MixedNode.scala:542:17, :551:17] wire [1:0] no_bufferIn_d_bits_param = no_bufferOut_d_bits_param; // @[MixedNode.scala:542:17, :551:17] wire [2:0] no_bufferIn_d_bits_size = no_bufferOut_d_bits_size; // @[MixedNode.scala:542:17, :551:17] wire [6:0] no_bufferIn_d_bits_source = no_bufferOut_d_bits_source; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_d_bits_sink = no_bufferOut_d_bits_sink; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_d_bits_denied = no_bufferOut_d_bits_denied; // @[MixedNode.scala:542:17, :551:17] wire [31:0] no_bufferIn_d_bits_data = no_bufferOut_d_bits_data; // @[MixedNode.scala:542:17, :551:17] wire no_bufferIn_d_bits_corrupt = no_bufferOut_d_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign no_bufferOut_a_valid = no_bufferIn_a_valid; // @[MixedNode.scala:542:17, :551:17] assign no_bufferOut_a_bits_opcode = no_bufferIn_a_bits_opcode; // @[MixedNode.scala:542:17, :551:17] assign no_bufferOut_a_bits_param = no_bufferIn_a_bits_param; // @[MixedNode.scala:542:17, :551:17] assign no_bufferOut_a_bits_size = no_bufferIn_a_bits_size; // @[MixedNode.scala:542:17, :551:17] assign no_bufferOut_a_bits_source = no_bufferIn_a_bits_source; // @[MixedNode.scala:542:17, :551:17] assign no_bufferOut_a_bits_address = no_bufferIn_a_bits_address; // @[MixedNode.scala:542:17, :551:17] assign no_bufferOut_a_bits_mask = no_bufferIn_a_bits_mask; // @[MixedNode.scala:542:17, :551:17] assign no_bufferOut_a_bits_data = no_bufferIn_a_bits_data; // @[MixedNode.scala:542:17, :551:17] assign no_bufferOut_a_bits_corrupt = no_bufferIn_a_bits_corrupt; // @[MixedNode.scala:542:17, :551:17] assign no_bufferOut_d_ready = no_bufferIn_d_ready; // @[MixedNode.scala:542:17, :551:17] TLWidthWidget8_2 widget ( // @[WidthWidget.scala:230:28] .clock (clock), .reset (reset), .auto_anon_in_a_ready (tlOut_a_ready), .auto_anon_in_a_valid (tlOut_a_valid), // @[MixedNode.scala:542:17] .auto_anon_in_a_bits_opcode (tlOut_a_bits_opcode), // @[MixedNode.scala:542:17] .auto_anon_in_a_bits_param (tlOut_a_bits_param), // @[MixedNode.scala:542:17] .auto_anon_in_a_bits_size (tlOut_a_bits_size), // @[MixedNode.scala:542:17] .auto_anon_in_a_bits_source (tlOut_a_bits_source), // @[MixedNode.scala:542:17] .auto_anon_in_a_bits_address (tlOut_a_bits_address), // @[MixedNode.scala:542:17] .auto_anon_in_a_bits_mask (tlOut_a_bits_mask), // @[MixedNode.scala:542:17] .auto_anon_in_a_bits_data (tlOut_a_bits_data), // @[MixedNode.scala:542:17] .auto_anon_in_a_bits_corrupt (tlOut_a_bits_corrupt), // @[MixedNode.scala:542:17] .auto_anon_in_d_ready (tlOut_d_ready), // @[MixedNode.scala:542:17] .auto_anon_in_d_valid (tlOut_d_valid), .auto_anon_in_d_bits_opcode (tlOut_d_bits_opcode), .auto_anon_in_d_bits_param (tlOut_d_bits_param), .auto_anon_in_d_bits_size (tlOut_d_bits_size), .auto_anon_in_d_bits_source (tlOut_d_bits_source), .auto_anon_in_d_bits_sink (tlOut_d_bits_sink), .auto_anon_in_d_bits_denied (tlOut_d_bits_denied), .auto_anon_in_d_bits_data (tlOut_d_bits_data), .auto_anon_in_d_bits_corrupt (tlOut_d_bits_corrupt), .auto_anon_out_a_ready (no_bufferIn_a_ready), // @[MixedNode.scala:551:17] .auto_anon_out_a_valid (no_bufferIn_a_valid), .auto_anon_out_a_bits_opcode (no_bufferIn_a_bits_opcode), .auto_anon_out_a_bits_param (no_bufferIn_a_bits_param), .auto_anon_out_a_bits_size (no_bufferIn_a_bits_size), .auto_anon_out_a_bits_source (no_bufferIn_a_bits_source), .auto_anon_out_a_bits_address (no_bufferIn_a_bits_address), .auto_anon_out_a_bits_mask (no_bufferIn_a_bits_mask), .auto_anon_out_a_bits_data (no_bufferIn_a_bits_data), .auto_anon_out_a_bits_corrupt (no_bufferIn_a_bits_corrupt), .auto_anon_out_d_ready (no_bufferIn_d_ready), .auto_anon_out_d_valid (no_bufferIn_d_valid), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_opcode (no_bufferIn_d_bits_opcode), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_param (no_bufferIn_d_bits_param), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_size (no_bufferIn_d_bits_size), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_source (no_bufferIn_d_bits_source), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_sink (no_bufferIn_d_bits_sink), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_denied (no_bufferIn_d_bits_denied), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_data (no_bufferIn_d_bits_data), // @[MixedNode.scala:551:17] .auto_anon_out_d_bits_corrupt (no_bufferIn_d_bits_corrupt) // @[MixedNode.scala:551:17] ); // @[WidthWidget.scala:230:28] assign auto_tl_slave_clock_xing_out_a_valid = auto_tl_slave_clock_xing_out_a_valid_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_slave_clock_xing_out_a_bits_opcode = auto_tl_slave_clock_xing_out_a_bits_opcode_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_slave_clock_xing_out_a_bits_param = auto_tl_slave_clock_xing_out_a_bits_param_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_slave_clock_xing_out_a_bits_size = auto_tl_slave_clock_xing_out_a_bits_size_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_slave_clock_xing_out_a_bits_source = auto_tl_slave_clock_xing_out_a_bits_source_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_slave_clock_xing_out_a_bits_address = auto_tl_slave_clock_xing_out_a_bits_address_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_slave_clock_xing_out_a_bits_mask = auto_tl_slave_clock_xing_out_a_bits_mask_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_slave_clock_xing_out_a_bits_data = auto_tl_slave_clock_xing_out_a_bits_data_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_slave_clock_xing_out_a_bits_corrupt = auto_tl_slave_clock_xing_out_a_bits_corrupt_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_slave_clock_xing_out_d_ready = auto_tl_slave_clock_xing_out_d_ready_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_in_a_ready = auto_tl_in_a_ready_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_in_d_valid = auto_tl_in_d_valid_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_in_d_bits_opcode = auto_tl_in_d_bits_opcode_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_in_d_bits_param = auto_tl_in_d_bits_param_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_in_d_bits_size = auto_tl_in_d_bits_size_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_in_d_bits_source = auto_tl_in_d_bits_source_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_in_d_bits_sink = auto_tl_in_d_bits_sink_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_in_d_bits_denied = auto_tl_in_d_bits_denied_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_in_d_bits_data = auto_tl_in_d_bits_data_0; // @[LazyModuleImp.scala:138:7] assign auto_tl_in_d_bits_corrupt = auto_tl_in_d_bits_corrupt_0; // @[LazyModuleImp.scala:138:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module PE_342( // @[PE.scala:31:7] input clock, // @[PE.scala:31:7] input reset, // @[PE.scala:31:7] input [7:0] io_in_a, // @[PE.scala:35:14] input [19:0] io_in_b, // @[PE.scala:35:14] input [19:0] io_in_d, // @[PE.scala:35:14] output [7:0] io_out_a, // @[PE.scala:35:14] output [19:0] io_out_b, // @[PE.scala:35:14] output [19:0] io_out_c, // @[PE.scala:35:14] input io_in_control_dataflow, // @[PE.scala:35:14] input io_in_control_propagate, // @[PE.scala:35:14] input [4:0] io_in_control_shift, // @[PE.scala:35:14] output io_out_control_dataflow, // @[PE.scala:35:14] output io_out_control_propagate, // @[PE.scala:35:14] output [4:0] io_out_control_shift, // @[PE.scala:35:14] input [2:0] io_in_id, // @[PE.scala:35:14] output [2:0] io_out_id, // @[PE.scala:35:14] input io_in_last, // @[PE.scala:35:14] output io_out_last, // @[PE.scala:35:14] input io_in_valid, // @[PE.scala:35:14] output io_out_valid // @[PE.scala:35:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:31:7] wire [19:0] io_in_b_0 = io_in_b; // @[PE.scala:31:7] wire [19:0] io_in_d_0 = io_in_d; // @[PE.scala:31:7] wire io_in_control_dataflow_0 = io_in_control_dataflow; // @[PE.scala:31:7] wire io_in_control_propagate_0 = io_in_control_propagate; // @[PE.scala:31:7] wire [4:0] io_in_control_shift_0 = io_in_control_shift; // @[PE.scala:31:7] wire [2:0] io_in_id_0 = io_in_id; // @[PE.scala:31:7] wire io_in_last_0 = io_in_last; // @[PE.scala:31:7] wire io_in_valid_0 = io_in_valid; // @[PE.scala:31:7] wire io_bad_dataflow = 1'h0; // @[PE.scala:31:7] wire _io_out_c_T_5 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_6 = 1'h0; // @[Arithmetic.scala:125:60] wire _io_out_c_T_16 = 1'h0; // @[Arithmetic.scala:125:33] wire _io_out_c_T_17 = 1'h0; // @[Arithmetic.scala:125:60] wire [7:0] io_out_a_0 = io_in_a_0; // @[PE.scala:31:7] wire [19:0] _mac_unit_io_in_b_T = io_in_b_0; // @[PE.scala:31:7, :106:37] wire [19:0] _mac_unit_io_in_b_T_2 = io_in_b_0; // @[PE.scala:31:7, :113:37] wire [19:0] _mac_unit_io_in_b_T_8 = io_in_b_0; // @[PE.scala:31:7, :137:35] wire io_out_control_dataflow_0 = io_in_control_dataflow_0; // @[PE.scala:31:7] wire io_out_control_propagate_0 = io_in_control_propagate_0; // @[PE.scala:31:7] wire [4:0] io_out_control_shift_0 = io_in_control_shift_0; // @[PE.scala:31:7] wire [2:0] io_out_id_0 = io_in_id_0; // @[PE.scala:31:7] wire io_out_last_0 = io_in_last_0; // @[PE.scala:31:7] wire io_out_valid_0 = io_in_valid_0; // @[PE.scala:31:7] wire [19:0] io_out_b_0; // @[PE.scala:31:7] wire [19:0] io_out_c_0; // @[PE.scala:31:7] reg [7:0] c1; // @[PE.scala:70:15] wire [7:0] _io_out_c_zeros_T_1 = c1; // @[PE.scala:70:15] wire [7:0] _mac_unit_io_in_b_T_6 = c1; // @[PE.scala:70:15, :127:38] reg [7:0] c2; // @[PE.scala:71:15] wire [7:0] _io_out_c_zeros_T_10 = c2; // @[PE.scala:71:15] wire [7:0] _mac_unit_io_in_b_T_4 = c2; // @[PE.scala:71:15, :121:38] reg last_s; // @[PE.scala:89:25] wire flip = last_s != io_in_control_propagate_0; // @[PE.scala:31:7, :89:25, :90:21] wire [4:0] shift_offset = flip ? io_in_control_shift_0 : 5'h0; // @[PE.scala:31:7, :90:21, :91:25] wire _GEN = shift_offset == 5'h0; // @[PE.scala:91:25] wire _io_out_c_point_five_T; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T = _GEN; // @[Arithmetic.scala:101:32] wire _io_out_c_point_five_T_5; // @[Arithmetic.scala:101:32] assign _io_out_c_point_five_T_5 = _GEN; // @[Arithmetic.scala:101:32] wire [5:0] _GEN_0 = {1'h0, shift_offset} - 6'h1; // @[PE.scala:91:25] wire [5:0] _io_out_c_point_five_T_1; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_1 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_2; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_2 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [5:0] _io_out_c_point_five_T_6; // @[Arithmetic.scala:101:53] assign _io_out_c_point_five_T_6 = _GEN_0; // @[Arithmetic.scala:101:53] wire [5:0] _io_out_c_zeros_T_11; // @[Arithmetic.scala:102:66] assign _io_out_c_zeros_T_11 = _GEN_0; // @[Arithmetic.scala:101:53, :102:66] wire [4:0] _io_out_c_point_five_T_2 = _io_out_c_point_five_T_1[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_3 = $signed($signed(c1) >>> _io_out_c_point_five_T_2); // @[PE.scala:70:15] wire _io_out_c_point_five_T_4 = _io_out_c_point_five_T_3[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five = ~_io_out_c_point_five_T & _io_out_c_point_five_T_4; // @[Arithmetic.scala:101:{29,32,50}] wire _GEN_1 = shift_offset < 5'h2; // @[PE.scala:91:25] wire _io_out_c_zeros_T; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T = _GEN_1; // @[Arithmetic.scala:102:27] wire _io_out_c_zeros_T_9; // @[Arithmetic.scala:102:27] assign _io_out_c_zeros_T_9 = _GEN_1; // @[Arithmetic.scala:102:27] wire [4:0] _io_out_c_zeros_T_3 = _io_out_c_zeros_T_2[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_4 = 32'h1 << _io_out_c_zeros_T_3; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_5 = {1'h0, _io_out_c_zeros_T_4} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_6 = _io_out_c_zeros_T_5[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_7 = {24'h0, _io_out_c_zeros_T_6[7:0] & _io_out_c_zeros_T_1}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_8 = _io_out_c_zeros_T ? 32'h0 : _io_out_c_zeros_T_7; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros = |_io_out_c_zeros_T_8; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_2 = {3'h0, shift_offset}; // @[PE.scala:91:25] wire [7:0] _GEN_3 = $signed($signed(c1) >>> _GEN_2); // @[PE.scala:70:15] wire [7:0] _io_out_c_ones_digit_T; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T = _GEN_3; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T; // @[Arithmetic.scala:107:15] assign _io_out_c_T = _GEN_3; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit = _io_out_c_ones_digit_T[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T = io_out_c_zeros | io_out_c_ones_digit; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_1 = io_out_c_point_five & _io_out_c_r_T; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r = _io_out_c_r_T_1; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_1 = {1'h0, io_out_c_r}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_2 = {_io_out_c_T[7], _io_out_c_T} + {{7{_io_out_c_T_1[1]}}, _io_out_c_T_1}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_3 = _io_out_c_T_2[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_4 = _io_out_c_T_3; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_7 = {{12{_io_out_c_T_4[7]}}, _io_out_c_T_4}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_8 = _io_out_c_T_7; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_9 = _io_out_c_T_8; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_10 = _io_out_c_T_9; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_1 = _mac_unit_io_in_b_T; // @[PE.scala:106:37] wire [7:0] _mac_unit_io_in_b_WIRE = _mac_unit_io_in_b_T_1[7:0]; // @[PE.scala:106:37] wire [7:0] _c1_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c2_T = io_in_d_0[7:0]; // @[PE.scala:31:7] wire [7:0] _c1_T_1 = _c1_T; // @[Arithmetic.scala:114:{15,33}] wire [4:0] _io_out_c_point_five_T_7 = _io_out_c_point_five_T_6[4:0]; // @[Arithmetic.scala:101:53] wire [7:0] _io_out_c_point_five_T_8 = $signed($signed(c2) >>> _io_out_c_point_five_T_7); // @[PE.scala:71:15] wire _io_out_c_point_five_T_9 = _io_out_c_point_five_T_8[0]; // @[Arithmetic.scala:101:50] wire io_out_c_point_five_1 = ~_io_out_c_point_five_T_5 & _io_out_c_point_five_T_9; // @[Arithmetic.scala:101:{29,32,50}] wire [4:0] _io_out_c_zeros_T_12 = _io_out_c_zeros_T_11[4:0]; // @[Arithmetic.scala:102:66] wire [31:0] _io_out_c_zeros_T_13 = 32'h1 << _io_out_c_zeros_T_12; // @[Arithmetic.scala:102:{60,66}] wire [32:0] _io_out_c_zeros_T_14 = {1'h0, _io_out_c_zeros_T_13} - 33'h1; // @[Arithmetic.scala:102:{60,81}] wire [31:0] _io_out_c_zeros_T_15 = _io_out_c_zeros_T_14[31:0]; // @[Arithmetic.scala:102:81] wire [31:0] _io_out_c_zeros_T_16 = {24'h0, _io_out_c_zeros_T_15[7:0] & _io_out_c_zeros_T_10}; // @[Arithmetic.scala:102:{45,52,81}] wire [31:0] _io_out_c_zeros_T_17 = _io_out_c_zeros_T_9 ? 32'h0 : _io_out_c_zeros_T_16; // @[Arithmetic.scala:102:{24,27,52}] wire io_out_c_zeros_1 = |_io_out_c_zeros_T_17; // @[Arithmetic.scala:102:{24,89}] wire [7:0] _GEN_4 = $signed($signed(c2) >>> _GEN_2); // @[PE.scala:71:15] wire [7:0] _io_out_c_ones_digit_T_1; // @[Arithmetic.scala:103:30] assign _io_out_c_ones_digit_T_1 = _GEN_4; // @[Arithmetic.scala:103:30] wire [7:0] _io_out_c_T_11; // @[Arithmetic.scala:107:15] assign _io_out_c_T_11 = _GEN_4; // @[Arithmetic.scala:103:30, :107:15] wire io_out_c_ones_digit_1 = _io_out_c_ones_digit_T_1[0]; // @[Arithmetic.scala:103:30] wire _io_out_c_r_T_2 = io_out_c_zeros_1 | io_out_c_ones_digit_1; // @[Arithmetic.scala:102:89, :103:30, :105:38] wire _io_out_c_r_T_3 = io_out_c_point_five_1 & _io_out_c_r_T_2; // @[Arithmetic.scala:101:29, :105:{29,38}] wire io_out_c_r_1 = _io_out_c_r_T_3; // @[Arithmetic.scala:105:{29,53}] wire [1:0] _io_out_c_T_12 = {1'h0, io_out_c_r_1}; // @[Arithmetic.scala:105:53, :107:33] wire [8:0] _io_out_c_T_13 = {_io_out_c_T_11[7], _io_out_c_T_11} + {{7{_io_out_c_T_12[1]}}, _io_out_c_T_12}; // @[Arithmetic.scala:107:{15,28,33}] wire [7:0] _io_out_c_T_14 = _io_out_c_T_13[7:0]; // @[Arithmetic.scala:107:28] wire [7:0] _io_out_c_T_15 = _io_out_c_T_14; // @[Arithmetic.scala:107:28] wire [19:0] _io_out_c_T_18 = {{12{_io_out_c_T_15[7]}}, _io_out_c_T_15}; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_19 = _io_out_c_T_18; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_20 = _io_out_c_T_19; // @[Mux.scala:126:16] wire [19:0] _io_out_c_T_21 = _io_out_c_T_20; // @[Arithmetic.scala:125:{81,99}] wire [19:0] _mac_unit_io_in_b_T_3 = _mac_unit_io_in_b_T_2; // @[PE.scala:113:37] wire [7:0] _mac_unit_io_in_b_WIRE_1 = _mac_unit_io_in_b_T_3[7:0]; // @[PE.scala:113:37] wire [7:0] _c2_T_1 = _c2_T; // @[Arithmetic.scala:114:{15,33}] wire [7:0] _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign _mac_unit_io_in_b_T_5 = _mac_unit_io_in_b_T_4; // @[PE.scala:121:38] wire [7:0] _mac_unit_io_in_b_WIRE_2 = _mac_unit_io_in_b_T_5; // @[PE.scala:121:38] assign io_out_c_0 = io_in_control_propagate_0 ? {{12{c1[7]}}, c1} : {{12{c2[7]}}, c2}; // @[PE.scala:31:7, :70:15, :71:15, :119:30, :120:16, :126:16] wire [7:0] _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] assign _mac_unit_io_in_b_T_7 = _mac_unit_io_in_b_T_6; // @[PE.scala:127:38] wire [7:0] _mac_unit_io_in_b_WIRE_3 = _mac_unit_io_in_b_T_7; // @[PE.scala:127:38] wire [19:0] _mac_unit_io_in_b_T_9 = _mac_unit_io_in_b_T_8; // @[PE.scala:137:35] wire [7:0] _mac_unit_io_in_b_WIRE_4 = _mac_unit_io_in_b_T_9[7:0]; // @[PE.scala:137:35] always @(posedge clock) begin // @[PE.scala:31:7] if (io_in_valid_0 & io_in_control_propagate_0) // @[PE.scala:31:7, :102:95, :141:17, :142:8] c1 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :70:15] if (~(~io_in_valid_0 | io_in_control_propagate_0)) // @[PE.scala:31:7, :71:15, :102:95, :119:30, :130:10, :141:{9,17}, :143:8] c2 <= io_in_d_0[7:0]; // @[PE.scala:31:7, :71:15] if (io_in_valid_0) // @[PE.scala:31:7] last_s <= io_in_control_propagate_0; // @[PE.scala:31:7, :89:25] always @(posedge) MacUnit_86 mac_unit ( // @[PE.scala:64:24] .clock (clock), .reset (reset), .io_in_a (io_in_a_0), // @[PE.scala:31:7] .io_in_b (io_in_control_propagate_0 ? _mac_unit_io_in_b_WIRE_2 : _mac_unit_io_in_b_WIRE_3), // @[PE.scala:31:7, :119:30, :121:{24,38}, :127:{24,38}] .io_in_c (io_in_b_0), // @[PE.scala:31:7] .io_out_d (io_out_b_0) ); // @[PE.scala:64:24] assign io_out_a = io_out_a_0; // @[PE.scala:31:7] assign io_out_b = io_out_b_0; // @[PE.scala:31:7] assign io_out_c = io_out_c_0; // @[PE.scala:31:7] assign io_out_control_dataflow = io_out_control_dataflow_0; // @[PE.scala:31:7] assign io_out_control_propagate = io_out_control_propagate_0; // @[PE.scala:31:7] assign io_out_control_shift = io_out_control_shift_0; // @[PE.scala:31:7] assign io_out_id = io_out_id_0; // @[PE.scala:31:7] assign io_out_last = io_out_last_0; // @[PE.scala:31:7] assign io_out_valid = io_out_valid_0; // @[PE.scala:31:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Buffer.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.BufferParams class TLBufferNode ( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit valName: ValName) extends TLAdapterNode( clientFn = { p => p.v1copy(minLatency = p.minLatency + b.latency + c.latency) }, managerFn = { p => p.v1copy(minLatency = p.minLatency + a.latency + d.latency) } ) { override lazy val nodedebugstring = s"a:${a.toString}, b:${b.toString}, c:${c.toString}, d:${d.toString}, e:${e.toString}" override def circuitIdentity = List(a,b,c,d,e).forall(_ == BufferParams.none) } class TLBuffer( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters) extends LazyModule { def this(ace: BufferParams, bd: BufferParams)(implicit p: Parameters) = this(ace, bd, ace, bd, ace) def this(abcde: BufferParams)(implicit p: Parameters) = this(abcde, abcde) def this()(implicit p: Parameters) = this(BufferParams.default) val node = new TLBufferNode(a, b, c, d, e) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def headBundle = node.out.head._2.bundle override def desiredName = (Seq("TLBuffer") ++ node.out.headOption.map(_._2.bundle.shortName)).mkString("_") (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.a <> a(in .a) in .d <> d(out.d) if (edgeOut.manager.anySupportAcquireB && edgeOut.client.anySupportProbe) { in .b <> b(out.b) out.c <> c(in .c) out.e <> e(in .e) } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLBuffer { def apply() (implicit p: Parameters): TLNode = apply(BufferParams.default) def apply(abcde: BufferParams) (implicit p: Parameters): TLNode = apply(abcde, abcde) def apply(ace: BufferParams, bd: BufferParams)(implicit p: Parameters): TLNode = apply(ace, bd, ace, bd, ace) def apply( a: BufferParams, b: BufferParams, c: BufferParams, d: BufferParams, e: BufferParams)(implicit p: Parameters): TLNode = { val buffer = LazyModule(new TLBuffer(a, b, c, d, e)) buffer.node } def chain(depth: Int, name: Option[String] = None)(implicit p: Parameters): Seq[TLNode] = { val buffers = Seq.fill(depth) { LazyModule(new TLBuffer()) } name.foreach { n => buffers.zipWithIndex.foreach { case (b, i) => b.suggestName(s"${n}_${i}") } } buffers.map(_.node) } def chainNode(depth: Int, name: Option[String] = None)(implicit p: Parameters): TLNode = { chain(depth, name) .reduceLeftOption(_ :*=* _) .getOrElse(TLNameNode("no_buffer")) } } File AtomicAutomata.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes} import freechips.rocketchip.util.leftOR import scala.math.{min,max} // Ensures that all downstream RW managers support Atomic operations. // If !passthrough, intercept all Atomics. Otherwise, only intercept those unsupported downstream. class TLAtomicAutomata(logical: Boolean = true, arithmetic: Boolean = true, concurrency: Int = 1, passthrough: Boolean = true)(implicit p: Parameters) extends LazyModule { require (concurrency >= 1) val node = TLAdapterNode( managerFn = { case mp => mp.v1copy(managers = mp.managers.map { m => val ourSupport = TransferSizes(1, mp.beatBytes) def widen(x: TransferSizes) = if (passthrough && x.min <= 2*mp.beatBytes) TransferSizes(1, max(mp.beatBytes, x.max)) else ourSupport val canDoit = m.supportsPutFull.contains(ourSupport) && m.supportsGet.contains(ourSupport) // Blow up if there are devices to which we cannot add Atomics, because their R|W are too inflexible require (!m.supportsPutFull || !m.supportsGet || canDoit, s"${m.name} has $ourSupport, needed PutFull(${m.supportsPutFull}) or Get(${m.supportsGet})") m.v1copy( supportsArithmetic = if (!arithmetic || !canDoit) m.supportsArithmetic else widen(m.supportsArithmetic), supportsLogical = if (!logical || !canDoit) m.supportsLogical else widen(m.supportsLogical), mayDenyGet = m.mayDenyGet || m.mayDenyPut) })}) lazy val module = new Impl class Impl extends LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => val managers = edgeOut.manager.managers val beatBytes = edgeOut.manager.beatBytes // To which managers are we adding atomic support? val ourSupport = TransferSizes(1, beatBytes) val managersNeedingHelp = managers.filter { m => m.supportsPutFull.contains(ourSupport) && m.supportsGet.contains(ourSupport) && ((logical && !m.supportsLogical .contains(ourSupport)) || (arithmetic && !m.supportsArithmetic.contains(ourSupport)) || !passthrough) // we will do atomics for everyone we can } // Managers that need help with atomics must necessarily have this node as the root of a tree in the node graph. // (But they must also ensure no sideband operations can get between the read and write.) val violations = managersNeedingHelp.flatMap(_.findTreeViolation()).map { node => (node.name, node.inputs.map(_._1.name)) } require(violations.isEmpty, s"AtomicAutomata can only help nodes for which it is at the root of a diplomatic node tree," + "but the following violations were found:\n" + violations.map(v => s"(${v._1} has parents ${v._2})").mkString("\n")) // We cannot add atomics to a non-FIFO manager managersNeedingHelp foreach { m => require (m.fifoId.isDefined) } // We need to preserve FIFO semantics across FIFO domains, not managers // Suppose you have Put(42) Atomic(+1) both inflight; valid results: 42 or 43 // If we allow Put(42) Get() Put(+1) concurrent; valid results: 42 43 OR undef // Making non-FIFO work requires waiting for all Acks to come back (=> use FIFOFixer) val domainsNeedingHelp = managersNeedingHelp.map(_.fifoId.get).distinct // Don't overprovision the CAM val camSize = min(domainsNeedingHelp.size, concurrency) // Compact the fifoIds to only those we care about def camFifoId(m: TLSlaveParameters) = m.fifoId.map(id => max(0, domainsNeedingHelp.indexOf(id))).getOrElse(0) // CAM entry state machine val FREE = 0.U // unused waiting on Atomic from A val GET = 3.U // Get sent down A waiting on AccessDataAck from D val AMO = 2.U // AccessDataAck sent up D waiting for A availability val ACK = 1.U // Put sent down A waiting for PutAck from D val params = TLAtomicAutomata.CAMParams(out.a.bits.params, domainsNeedingHelp.size) // Do we need to do anything at all? if (camSize > 0) { val initval = Wire(new TLAtomicAutomata.CAM_S(params)) initval.state := FREE val cam_s = RegInit(VecInit.fill(camSize)(initval)) val cam_a = Reg(Vec(camSize, new TLAtomicAutomata.CAM_A(params))) val cam_d = Reg(Vec(camSize, new TLAtomicAutomata.CAM_D(params))) val cam_free = cam_s.map(_.state === FREE) val cam_amo = cam_s.map(_.state === AMO) val cam_abusy = cam_s.map(e => e.state === GET || e.state === AMO) // A is blocked val cam_dmatch = cam_s.map(e => e.state =/= FREE) // D should inspect these entries // Can the manager already handle this message? val a_address = edgeIn.address(in.a.bits) val a_size = edgeIn.size(in.a.bits) val a_canLogical = passthrough.B && edgeOut.manager.supportsLogicalFast (a_address, a_size) val a_canArithmetic = passthrough.B && edgeOut.manager.supportsArithmeticFast(a_address, a_size) val a_isLogical = in.a.bits.opcode === TLMessages.LogicalData val a_isArithmetic = in.a.bits.opcode === TLMessages.ArithmeticData val a_isSupported = Mux(a_isLogical, a_canLogical, Mux(a_isArithmetic, a_canArithmetic, true.B)) // Must we do a Put? val a_cam_any_put = cam_amo.reduce(_ || _) val a_cam_por_put = cam_amo.scanLeft(false.B)(_||_).init val a_cam_sel_put = (cam_amo zip a_cam_por_put) map { case (a, b) => a && !b } val a_cam_a = PriorityMux(cam_amo, cam_a) val a_cam_d = PriorityMux(cam_amo, cam_d) val a_a = a_cam_a.bits.data val a_d = a_cam_d.data // Does the A request conflict with an inflight AMO? val a_fifoId = edgeOut.manager.fastProperty(a_address, camFifoId _, (i:Int) => i.U) val a_cam_busy = (cam_abusy zip cam_a.map(_.fifoId === a_fifoId)) map { case (a,b) => a&&b } reduce (_||_) // (Where) are we are allocating in the CAM? val a_cam_any_free = cam_free.reduce(_ || _) val a_cam_por_free = cam_free.scanLeft(false.B)(_||_).init val a_cam_sel_free = (cam_free zip a_cam_por_free) map { case (a,b) => a && !b } // Logical AMO val indexes = Seq.tabulate(beatBytes*8) { i => Cat(a_a(i,i), a_d(i,i)) } val logic_out = Cat(indexes.map(x => a_cam_a.lut(x).asUInt).reverse) // Arithmetic AMO val unsigned = a_cam_a.bits.param(1) val take_max = a_cam_a.bits.param(0) val adder = a_cam_a.bits.param(2) val mask = a_cam_a.bits.mask val signSel = ~(~mask | (mask >> 1)) val signbits_a = Cat(Seq.tabulate(beatBytes) { i => a_a(8*i+7,8*i+7) } .reverse) val signbits_d = Cat(Seq.tabulate(beatBytes) { i => a_d(8*i+7,8*i+7) } .reverse) // Move the selected sign bit into the first byte position it will extend val signbit_a = ((signbits_a & signSel) << 1)(beatBytes-1, 0) val signbit_d = ((signbits_d & signSel) << 1)(beatBytes-1, 0) val signext_a = FillInterleaved(8, leftOR(signbit_a)) val signext_d = FillInterleaved(8, leftOR(signbit_d)) // NOTE: sign-extension does not change the relative ordering in EITHER unsigned or signed arithmetic val wide_mask = FillInterleaved(8, mask) val a_a_ext = (a_a & wide_mask) | signext_a val a_d_ext = (a_d & wide_mask) | signext_d val a_d_inv = Mux(adder, a_d_ext, ~a_d_ext) val adder_out = a_a_ext + a_d_inv val h = 8*beatBytes-1 // now sign-extended; use biggest bit val a_bigger_uneq = unsigned === a_a_ext(h) // result if high bits are unequal val a_bigger = Mux(a_a_ext(h) === a_d_ext(h), !adder_out(h), a_bigger_uneq) val pick_a = take_max === a_bigger val arith_out = Mux(adder, adder_out, Mux(pick_a, a_a, a_d)) // AMO result data val amo_data = if (!logical) arith_out else if (!arithmetic) logic_out else Mux(a_cam_a.bits.opcode(0), logic_out, arith_out) // Potentially mutate the message from inner val source_i = Wire(chiselTypeOf(in.a)) val a_allow = !a_cam_busy && (a_isSupported || a_cam_any_free) in.a.ready := source_i.ready && a_allow source_i.valid := in.a.valid && a_allow source_i.bits := in.a.bits when (!a_isSupported) { // minimal mux difference source_i.bits.opcode := TLMessages.Get source_i.bits.param := 0.U } // Potentially take the message from the CAM val source_c = Wire(chiselTypeOf(in.a)) source_c.valid := a_cam_any_put source_c.bits := edgeOut.Put( fromSource = a_cam_a.bits.source, toAddress = edgeIn.address(a_cam_a.bits), lgSize = a_cam_a.bits.size, data = amo_data, corrupt = a_cam_a.bits.corrupt || a_cam_d.corrupt)._2 source_c.bits.user :<= a_cam_a.bits.user source_c.bits.echo :<= a_cam_a.bits.echo // Finishing an AMO from the CAM has highest priority TLArbiter(TLArbiter.lowestIndexFirst)(out.a, (0.U, source_c), (edgeOut.numBeats1(in.a.bits), source_i)) // Capture the A state into the CAM when (source_i.fire && !a_isSupported) { (a_cam_sel_free zip cam_a) foreach { case (en, r) => when (en) { r.fifoId := a_fifoId r.bits := in.a.bits r.lut := MuxLookup(in.a.bits.param(1, 0), 0.U(4.W))(Array( TLAtomics.AND -> 0x8.U, TLAtomics.OR -> 0xe.U, TLAtomics.XOR -> 0x6.U, TLAtomics.SWAP -> 0xc.U)) } } (a_cam_sel_free zip cam_s) foreach { case (en, r) => when (en) { r.state := GET } } } // Advance the put state when (source_c.fire) { (a_cam_sel_put zip cam_s) foreach { case (en, r) => when (en) { r.state := ACK } } } // We need to deal with a potential D response in the same cycle as the A request val d_first = edgeOut.first(out.d) val d_cam_sel_raw = cam_a.map(_.bits.source === in.d.bits.source) val d_cam_sel_match = (d_cam_sel_raw zip cam_dmatch) map { case (a,b) => a&&b } val d_cam_data = Mux1H(d_cam_sel_match, cam_d.map(_.data)) val d_cam_denied = Mux1H(d_cam_sel_match, cam_d.map(_.denied)) val d_cam_corrupt = Mux1H(d_cam_sel_match, cam_d.map(_.corrupt)) val d_cam_sel_bypass = if (edgeOut.manager.minLatency > 0) false.B else out.d.bits.source === in.a.bits.source && in.a.valid && !a_isSupported val d_cam_sel = (a_cam_sel_free zip d_cam_sel_match) map { case (a,d) => Mux(d_cam_sel_bypass, a, d) } val d_cam_sel_any = d_cam_sel_bypass || d_cam_sel_match.reduce(_ || _) val d_ackd = out.d.bits.opcode === TLMessages.AccessAckData val d_ack = out.d.bits.opcode === TLMessages.AccessAck when (out.d.fire && d_first) { (d_cam_sel zip cam_d) foreach { case (en, r) => when (en && d_ackd) { r.data := out.d.bits.data r.denied := out.d.bits.denied r.corrupt := out.d.bits.corrupt } } (d_cam_sel zip cam_s) foreach { case (en, r) => when (en) { // Note: it is important that this comes AFTER the := GET, so we can go FREE=>GET=>AMO in one cycle r.state := Mux(d_ackd, AMO, FREE) } } } val d_drop = d_first && d_ackd && d_cam_sel_any val d_replace = d_first && d_ack && d_cam_sel_match.reduce(_ || _) in.d.valid := out.d.valid && !d_drop out.d.ready := in.d.ready || d_drop in.d.bits := out.d.bits when (d_replace) { // minimal muxes in.d.bits.opcode := TLMessages.AccessAckData in.d.bits.data := d_cam_data in.d.bits.corrupt := d_cam_corrupt || out.d.bits.denied in.d.bits.denied := d_cam_denied || out.d.bits.denied } } else { out.a.valid := in.a.valid in.a.ready := out.a.ready out.a.bits := in.a.bits in.d.valid := out.d.valid out.d.ready := in.d.ready in.d.bits := out.d.bits } if (edgeOut.manager.anySupportAcquireB && edgeIn.client.anySupportProbe) { in.b.valid := out.b.valid out.b.ready := in.b.ready in.b.bits := out.b.bits out.c.valid := in.c.valid in.c.ready := out.c.ready out.c.bits := in.c.bits out.e.valid := in.e.valid in.e.ready := out.e.ready out.e.bits := in.e.bits } else { in.b.valid := false.B in.c.ready := true.B in.e.ready := true.B out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B } } } } object TLAtomicAutomata { def apply(logical: Boolean = true, arithmetic: Boolean = true, concurrency: Int = 1, passthrough: Boolean = true, nameSuffix: Option[String] = None)(implicit p: Parameters): TLNode = { val atomics = LazyModule(new TLAtomicAutomata(logical, arithmetic, concurrency, passthrough) { override lazy val desiredName = (Seq("TLAtomicAutomata") ++ nameSuffix).mkString("_") }) atomics.node } case class CAMParams(a: TLBundleParameters, domainsNeedingHelp: Int) class CAM_S(val params: CAMParams) extends Bundle { val state = UInt(2.W) } class CAM_A(val params: CAMParams) extends Bundle { val bits = new TLBundleA(params.a) val fifoId = UInt(log2Up(params.domainsNeedingHelp).W) val lut = UInt(4.W) } class CAM_D(val params: CAMParams) extends Bundle { val data = UInt(params.a.dataBits.W) val denied = Bool() val corrupt = Bool() } } // Synthesizable unit tests import freechips.rocketchip.unittest._ class TLRAMAtomicAutomata(txns: Int)(implicit p: Parameters) extends LazyModule { val fuzz = LazyModule(new TLFuzzer(txns)) val model = LazyModule(new TLRAMModel("AtomicAutomata")) val ram = LazyModule(new TLRAM(AddressSet(0x0, 0x3ff))) // Confirm that the AtomicAutomata combines read + write errors import TLMessages._ val test = new RequestPattern({a: TLBundleA => val doesA = a.opcode === ArithmeticData || a.opcode === LogicalData val doesR = a.opcode === Get || doesA val doesW = a.opcode === PutFullData || a.opcode === PutPartialData || doesA (doesR && RequestPattern.overlaps(Seq(AddressSet(0x08, ~0x08)))(a)) || (doesW && RequestPattern.overlaps(Seq(AddressSet(0x10, ~0x10)))(a)) }) (ram.node := TLErrorEvaluator(test) := TLFragmenter(4, 256) := TLDelayer(0.1) := TLAtomicAutomata() := TLDelayer(0.1) := TLErrorEvaluator(test, testOn=true, testOff=true) := model.node := fuzz.node) lazy val module = new Impl class Impl extends LazyModuleImp(this) with UnitTestModule { io.finished := fuzz.module.io.finished } } class TLRAMAtomicAutomataTest(txns: Int = 5000, timeout: Int = 500000)(implicit p: Parameters) extends UnitTest(timeout) { val dut = Module(LazyModule(new TLRAMAtomicAutomata(txns)).module) io.finished := dut.io.finished dut.io.start := io.start } File Nodes.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.util.{AsyncQueueParams,RationalDirection} case object TLMonitorBuilder extends Field[TLMonitorArgs => TLMonitorBase](args => new TLMonitor(args)) object TLImp extends NodeImp[TLMasterPortParameters, TLSlavePortParameters, TLEdgeOut, TLEdgeIn, TLBundle] { def edgeO(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeOut(pd, pu, p, sourceInfo) def edgeI(pd: TLMasterPortParameters, pu: TLSlavePortParameters, p: Parameters, sourceInfo: SourceInfo) = new TLEdgeIn (pd, pu, p, sourceInfo) def bundleO(eo: TLEdgeOut) = TLBundle(eo.bundle) def bundleI(ei: TLEdgeIn) = TLBundle(ei.bundle) def render(ei: TLEdgeIn) = RenderedEdge(colour = "#000000" /* black */, label = (ei.manager.beatBytes * 8).toString) override def monitor(bundle: TLBundle, edge: TLEdgeIn): Unit = { val monitor = Module(edge.params(TLMonitorBuilder)(TLMonitorArgs(edge))) monitor.io.in := bundle } override def mixO(pd: TLMasterPortParameters, node: OutwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLMasterPortParameters = pd.v1copy(clients = pd.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) }) override def mixI(pu: TLSlavePortParameters, node: InwardNode[TLMasterPortParameters, TLSlavePortParameters, TLBundle]): TLSlavePortParameters = pu.v1copy(managers = pu.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) }) } trait TLFormatNode extends FormatNode[TLEdgeIn, TLEdgeOut] case class TLClientNode(portParams: Seq[TLMasterPortParameters])(implicit valName: ValName) extends SourceNode(TLImp)(portParams) with TLFormatNode case class TLManagerNode(portParams: Seq[TLSlavePortParameters])(implicit valName: ValName) extends SinkNode(TLImp)(portParams) with TLFormatNode case class TLAdapterNode( clientFn: TLMasterPortParameters => TLMasterPortParameters = { s => s }, managerFn: TLSlavePortParameters => TLSlavePortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLJunctionNode( clientFn: Seq[TLMasterPortParameters] => Seq[TLMasterPortParameters], managerFn: Seq[TLSlavePortParameters] => Seq[TLSlavePortParameters])( implicit valName: ValName) extends JunctionNode(TLImp)(clientFn, managerFn) with TLFormatNode case class TLIdentityNode()(implicit valName: ValName) extends IdentityNode(TLImp)() with TLFormatNode object TLNameNode { def apply(name: ValName) = TLIdentityNode()(name) def apply(name: Option[String]): TLIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLIdentityNode = apply(Some(name)) } case class TLEphemeralNode()(implicit valName: ValName) extends EphemeralNode(TLImp)() object TLTempNode { def apply(): TLEphemeralNode = TLEphemeralNode()(ValName("temp")) } case class TLNexusNode( clientFn: Seq[TLMasterPortParameters] => TLMasterPortParameters, managerFn: Seq[TLSlavePortParameters] => TLSlavePortParameters)( implicit valName: ValName) extends NexusNode(TLImp)(clientFn, managerFn) with TLFormatNode abstract class TLCustomNode(implicit valName: ValName) extends CustomNode(TLImp) with TLFormatNode // Asynchronous crossings trait TLAsyncFormatNode extends FormatNode[TLAsyncEdgeParameters, TLAsyncEdgeParameters] object TLAsyncImp extends SimpleNodeImp[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncEdgeParameters, TLAsyncBundle] { def edge(pd: TLAsyncClientPortParameters, pu: TLAsyncManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLAsyncEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLAsyncEdgeParameters) = new TLAsyncBundle(e.bundle) def render(e: TLAsyncEdgeParameters) = RenderedEdge(colour = "#ff0000" /* red */, label = e.manager.async.depth.toString) override def mixO(pd: TLAsyncClientPortParameters, node: OutwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLAsyncManagerPortParameters, node: InwardNode[TLAsyncClientPortParameters, TLAsyncManagerPortParameters, TLAsyncBundle]): TLAsyncManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLAsyncAdapterNode( clientFn: TLAsyncClientPortParameters => TLAsyncClientPortParameters = { s => s }, managerFn: TLAsyncManagerPortParameters => TLAsyncManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLAsyncImp)(clientFn, managerFn) with TLAsyncFormatNode case class TLAsyncIdentityNode()(implicit valName: ValName) extends IdentityNode(TLAsyncImp)() with TLAsyncFormatNode object TLAsyncNameNode { def apply(name: ValName) = TLAsyncIdentityNode()(name) def apply(name: Option[String]): TLAsyncIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLAsyncIdentityNode = apply(Some(name)) } case class TLAsyncSourceNode(sync: Option[Int])(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLAsyncImp)( dFn = { p => TLAsyncClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = p.base.minLatency + sync.getOrElse(p.async.sync)) }) with FormatNode[TLEdgeIn, TLAsyncEdgeParameters] // discard cycles in other clock domain case class TLAsyncSinkNode(async: AsyncQueueParams)(implicit valName: ValName) extends MixedAdapterNode(TLAsyncImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = p.base.minLatency + async.sync) }, uFn = { p => TLAsyncManagerPortParameters(async, p) }) with FormatNode[TLAsyncEdgeParameters, TLEdgeOut] // Rationally related crossings trait TLRationalFormatNode extends FormatNode[TLRationalEdgeParameters, TLRationalEdgeParameters] object TLRationalImp extends SimpleNodeImp[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalEdgeParameters, TLRationalBundle] { def edge(pd: TLRationalClientPortParameters, pu: TLRationalManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLRationalEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLRationalEdgeParameters) = new TLRationalBundle(e.bundle) def render(e: TLRationalEdgeParameters) = RenderedEdge(colour = "#00ff00" /* green */) override def mixO(pd: TLRationalClientPortParameters, node: OutwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLRationalManagerPortParameters, node: InwardNode[TLRationalClientPortParameters, TLRationalManagerPortParameters, TLRationalBundle]): TLRationalManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLRationalAdapterNode( clientFn: TLRationalClientPortParameters => TLRationalClientPortParameters = { s => s }, managerFn: TLRationalManagerPortParameters => TLRationalManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLRationalImp)(clientFn, managerFn) with TLRationalFormatNode case class TLRationalIdentityNode()(implicit valName: ValName) extends IdentityNode(TLRationalImp)() with TLRationalFormatNode object TLRationalNameNode { def apply(name: ValName) = TLRationalIdentityNode()(name) def apply(name: Option[String]): TLRationalIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLRationalIdentityNode = apply(Some(name)) } case class TLRationalSourceNode()(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLRationalImp)( dFn = { p => TLRationalClientPortParameters(p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLRationalEdgeParameters] // discard cycles from other clock domain case class TLRationalSinkNode(direction: RationalDirection)(implicit valName: ValName) extends MixedAdapterNode(TLRationalImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLRationalManagerPortParameters(direction, p) }) with FormatNode[TLRationalEdgeParameters, TLEdgeOut] // Credited version of TileLink channels trait TLCreditedFormatNode extends FormatNode[TLCreditedEdgeParameters, TLCreditedEdgeParameters] object TLCreditedImp extends SimpleNodeImp[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedEdgeParameters, TLCreditedBundle] { def edge(pd: TLCreditedClientPortParameters, pu: TLCreditedManagerPortParameters, p: Parameters, sourceInfo: SourceInfo) = TLCreditedEdgeParameters(pd, pu, p, sourceInfo) def bundle(e: TLCreditedEdgeParameters) = new TLCreditedBundle(e.bundle) def render(e: TLCreditedEdgeParameters) = RenderedEdge(colour = "#ffff00" /* yellow */, e.delay.toString) override def mixO(pd: TLCreditedClientPortParameters, node: OutwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedClientPortParameters = pd.copy(base = pd.base.v1copy(clients = pd.base.clients.map { c => c.v1copy (nodePath = node +: c.nodePath) })) override def mixI(pu: TLCreditedManagerPortParameters, node: InwardNode[TLCreditedClientPortParameters, TLCreditedManagerPortParameters, TLCreditedBundle]): TLCreditedManagerPortParameters = pu.copy(base = pu.base.v1copy(managers = pu.base.managers.map { m => m.v1copy (nodePath = node +: m.nodePath) })) } case class TLCreditedAdapterNode( clientFn: TLCreditedClientPortParameters => TLCreditedClientPortParameters = { s => s }, managerFn: TLCreditedManagerPortParameters => TLCreditedManagerPortParameters = { s => s })( implicit valName: ValName) extends AdapterNode(TLCreditedImp)(clientFn, managerFn) with TLCreditedFormatNode case class TLCreditedIdentityNode()(implicit valName: ValName) extends IdentityNode(TLCreditedImp)() with TLCreditedFormatNode object TLCreditedNameNode { def apply(name: ValName) = TLCreditedIdentityNode()(name) def apply(name: Option[String]): TLCreditedIdentityNode = apply(ValName(name.getOrElse("with_no_name"))) def apply(name: String): TLCreditedIdentityNode = apply(Some(name)) } case class TLCreditedSourceNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLImp, TLCreditedImp)( dFn = { p => TLCreditedClientPortParameters(delay, p) }, uFn = { p => p.base.v1copy(minLatency = 1) }) with FormatNode[TLEdgeIn, TLCreditedEdgeParameters] // discard cycles from other clock domain case class TLCreditedSinkNode(delay: TLCreditedDelay)(implicit valName: ValName) extends MixedAdapterNode(TLCreditedImp, TLImp)( dFn = { p => p.base.v1copy(minLatency = 1) }, uFn = { p => TLCreditedManagerPortParameters(delay, p) }) with FormatNode[TLCreditedEdgeParameters, TLEdgeOut] File RegisterRouter.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.diplomacy.{AddressSet, TransferSizes} import freechips.rocketchip.resources.{Device, Resource, ResourceBindings} import freechips.rocketchip.prci.{NoCrossing} import freechips.rocketchip.regmapper.{RegField, RegMapper, RegMapperParams, RegMapperInput, RegisterRouter} import freechips.rocketchip.util.{BundleField, ControlKey, ElaborationArtefacts, GenRegDescsAnno} import scala.math.min class TLRegisterRouterExtraBundle(val sourceBits: Int, val sizeBits: Int) extends Bundle { val source = UInt((sourceBits max 1).W) val size = UInt((sizeBits max 1).W) } case object TLRegisterRouterExtra extends ControlKey[TLRegisterRouterExtraBundle]("tlrr_extra") case class TLRegisterRouterExtraField(sourceBits: Int, sizeBits: Int) extends BundleField[TLRegisterRouterExtraBundle](TLRegisterRouterExtra, Output(new TLRegisterRouterExtraBundle(sourceBits, sizeBits)), x => { x.size := 0.U x.source := 0.U }) /** TLRegisterNode is a specialized TL SinkNode that encapsulates MMIO registers. * It provides functionality for describing and outputting metdata about the registers in several formats. * It also provides a concrete implementation of a regmap function that will be used * to wire a map of internal registers associated with this node to the node's interconnect port. */ case class TLRegisterNode( address: Seq[AddressSet], device: Device, deviceKey: String = "reg/control", concurrency: Int = 0, beatBytes: Int = 4, undefZero: Boolean = true, executable: Boolean = false)( implicit valName: ValName) extends SinkNode(TLImp)(Seq(TLSlavePortParameters.v1( Seq(TLSlaveParameters.v1( address = address, resources = Seq(Resource(device, deviceKey)), executable = executable, supportsGet = TransferSizes(1, beatBytes), supportsPutPartial = TransferSizes(1, beatBytes), supportsPutFull = TransferSizes(1, beatBytes), fifoId = Some(0))), // requests are handled in order beatBytes = beatBytes, minLatency = min(concurrency, 1)))) with TLFormatNode // the Queue adds at most one cycle { val size = 1 << log2Ceil(1 + address.map(_.max).max - address.map(_.base).min) require (size >= beatBytes) address.foreach { case a => require (a.widen(size-1).base == address.head.widen(size-1).base, s"TLRegisterNode addresses (${address}) must be aligned to its size ${size}") } // Calling this method causes the matching TL2 bundle to be // configured to route all requests to the listed RegFields. def regmap(mapping: RegField.Map*) = { val (bundleIn, edge) = this.in(0) val a = bundleIn.a val d = bundleIn.d val fields = TLRegisterRouterExtraField(edge.bundle.sourceBits, edge.bundle.sizeBits) +: a.bits.params.echoFields val params = RegMapperParams(log2Up(size/beatBytes), beatBytes, fields) val in = Wire(Decoupled(new RegMapperInput(params))) in.bits.read := a.bits.opcode === TLMessages.Get in.bits.index := edge.addr_hi(a.bits) in.bits.data := a.bits.data in.bits.mask := a.bits.mask Connectable.waiveUnmatched(in.bits.extra, a.bits.echo) match { case (lhs, rhs) => lhs :<= rhs } val a_extra = in.bits.extra(TLRegisterRouterExtra) a_extra.source := a.bits.source a_extra.size := a.bits.size // Invoke the register map builder val out = RegMapper(beatBytes, concurrency, undefZero, in, mapping:_*) // No flow control needed in.valid := a.valid a.ready := in.ready d.valid := out.valid out.ready := d.ready // We must restore the size to enable width adapters to work val d_extra = out.bits.extra(TLRegisterRouterExtra) d.bits := edge.AccessAck(toSource = d_extra.source, lgSize = d_extra.size) // avoid a Mux on the data bus by manually overriding two fields d.bits.data := out.bits.data Connectable.waiveUnmatched(d.bits.echo, out.bits.extra) match { case (lhs, rhs) => lhs :<= rhs } d.bits.opcode := Mux(out.bits.read, TLMessages.AccessAckData, TLMessages.AccessAck) // Tie off unused channels bundleIn.b.valid := false.B bundleIn.c.ready := true.B bundleIn.e.ready := true.B genRegDescsJson(mapping:_*) } def genRegDescsJson(mapping: RegField.Map*): Unit = { // Dump out the register map for documentation purposes. val base = address.head.base val baseHex = s"0x${base.toInt.toHexString}" val name = s"${device.describe(ResourceBindings()).name}.At${baseHex}" val json = GenRegDescsAnno.serialize(base, name, mapping:_*) var suffix = 0 while( ElaborationArtefacts.contains(s"${baseHex}.${suffix}.regmap.json")) { suffix = suffix + 1 } ElaborationArtefacts.add(s"${baseHex}.${suffix}.regmap.json", json) val module = Module.currentModule.get.asInstanceOf[RawModule] GenRegDescsAnno.anno( module, base, mapping:_*) } } /** Mix HasTLControlRegMap into any subclass of RegisterRouter to gain helper functions for attaching a device control register map to TileLink. * - The intended use case is that controlNode will diplomatically publish a SW-visible device's memory-mapped control registers. * - Use the clock crossing helper controlXing to externally connect controlNode to a TileLink interconnect. * - Use the mapping helper function regmap to internally fill out the space of device control registers. */ trait HasTLControlRegMap { this: RegisterRouter => protected val controlNode = TLRegisterNode( address = address, device = device, deviceKey = "reg/control", concurrency = concurrency, beatBytes = beatBytes, undefZero = undefZero, executable = executable) // Externally, this helper should be used to connect the register control port to a bus val controlXing: TLInwardClockCrossingHelper = this.crossIn(controlNode) // Backwards-compatibility default node accessor with no clock crossing lazy val node: TLInwardNode = controlXing(NoCrossing) // Internally, this function should be used to populate the control port with registers protected def regmap(mapping: RegField.Map*): Unit = { controlNode.regmap(mapping:_*) } } File ClockDomain.scala: package freechips.rocketchip.prci import chisel3._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ abstract class Domain(implicit p: Parameters) extends LazyModule with HasDomainCrossing { def clockBundle: ClockBundle lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { childClock := clockBundle.clock childReset := clockBundle.reset override def provideImplicitClockToLazyChildren = true // these are just for backwards compatibility with external devices // that were manually wiring themselves to the domain's clock/reset input: val clock = IO(Output(chiselTypeOf(clockBundle.clock))) val reset = IO(Output(chiselTypeOf(clockBundle.reset))) clock := clockBundle.clock reset := clockBundle.reset } } abstract class ClockDomain(implicit p: Parameters) extends Domain with HasClockDomainCrossing class ClockSinkDomain(val clockSinkParams: ClockSinkParameters)(implicit p: Parameters) extends ClockDomain { def this(take: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSinkParameters(take = take, name = name)) val clockNode = ClockSinkNode(Seq(clockSinkParams)) def clockBundle = clockNode.in.head._1 override lazy val desiredName = (clockSinkParams.name.toSeq :+ "ClockSinkDomain").mkString } class ClockSourceDomain(val clockSourceParams: ClockSourceParameters)(implicit p: Parameters) extends ClockDomain { def this(give: Option[ClockParameters] = None, name: Option[String] = None)(implicit p: Parameters) = this(ClockSourceParameters(give = give, name = name)) val clockNode = ClockSourceNode(Seq(clockSourceParams)) def clockBundle = clockNode.out.head._1 override lazy val desiredName = (clockSourceParams.name.toSeq :+ "ClockSourceDomain").mkString } abstract class ResetDomain(implicit p: Parameters) extends Domain with HasResetDomainCrossing File PeripheryBus.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.subsystem import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.devices.tilelink.{BuiltInZeroDeviceParams, BuiltInErrorDeviceParams, HasBuiltInDeviceParams, BuiltInDevices} import freechips.rocketchip.diplomacy.BufferParams import freechips.rocketchip.tilelink.{ RegionReplicator, ReplicatedRegion, HasTLBusParams, HasRegionReplicatorParams, TLBusWrapper, TLBusWrapperInstantiationLike, TLFIFOFixer, TLNode, TLXbar, TLInwardNode, TLOutwardNode, TLBuffer, TLWidthWidget, TLAtomicAutomata, TLEdge } import freechips.rocketchip.util.Location case class BusAtomics( arithmetic: Boolean = true, buffer: BufferParams = BufferParams.default, widenBytes: Option[Int] = None ) case class PeripheryBusParams( beatBytes: Int, blockBytes: Int, atomics: Option[BusAtomics] = Some(BusAtomics()), dtsFrequency: Option[BigInt] = None, zeroDevice: Option[BuiltInZeroDeviceParams] = None, errorDevice: Option[BuiltInErrorDeviceParams] = None, replication: Option[ReplicatedRegion] = None) extends HasTLBusParams with HasBuiltInDeviceParams with HasRegionReplicatorParams with TLBusWrapperInstantiationLike { def instantiate(context: HasTileLinkLocations, loc: Location[TLBusWrapper])(implicit p: Parameters): PeripheryBus = { val pbus = LazyModule(new PeripheryBus(this, loc.name)) pbus.suggestName(loc.name) context.tlBusWrapperLocationMap += (loc -> pbus) pbus } } class PeripheryBus(params: PeripheryBusParams, name: String)(implicit p: Parameters) extends TLBusWrapper(params, name) { override lazy val desiredName = s"PeripheryBus_$name" private val replicator = params.replication.map(r => LazyModule(new RegionReplicator(r))) val prefixNode = replicator.map { r => r.prefix := addressPrefixNexusNode addressPrefixNexusNode } private val fixer = LazyModule(new TLFIFOFixer(TLFIFOFixer.all)) private val node: TLNode = params.atomics.map { pa => val in_xbar = LazyModule(new TLXbar(nameSuffix = Some(s"${name}_in"))) val out_xbar = LazyModule(new TLXbar(nameSuffix = Some(s"${name}_out"))) val fixer_node = replicator.map(fixer.node :*= _.node).getOrElse(fixer.node) (out_xbar.node :*= fixer_node :*= TLBuffer(pa.buffer) :*= (pa.widenBytes.filter(_ > beatBytes).map { w => TLWidthWidget(w) :*= TLAtomicAutomata(arithmetic = pa.arithmetic, nameSuffix = Some(name)) } .getOrElse { TLAtomicAutomata(arithmetic = pa.arithmetic, nameSuffix = Some(name)) }) :*= in_xbar.node) } .getOrElse { TLXbar() :*= fixer.node } def inwardNode: TLInwardNode = node def outwardNode: TLOutwardNode = node def busView: TLEdge = fixer.node.edges.in.head val builtInDevices: BuiltInDevices = BuiltInDevices.attach(params, outwardNode) } File BootAddrReg.scala: package testchipip.boot import chisel3._ import org.chipsalliance.cde.config.{Parameters, Field} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.devices.tilelink._ import freechips.rocketchip.regmapper._ import freechips.rocketchip.subsystem._ case class BootAddrRegParams( defaultBootAddress: BigInt = 0x80000000L, // This should be DRAM_BASE bootRegAddress: BigInt = 0x1000, slaveWhere: TLBusWrapperLocation = PBUS ) case object BootAddrRegKey extends Field[Option[BootAddrRegParams]](None) trait CanHavePeripheryBootAddrReg { this: BaseSubsystem => p(BootAddrRegKey).map { params => val tlbus = locateTLBusWrapper(params.slaveWhere) val device = new SimpleDevice("boot-address-reg", Nil) tlbus { val node = TLRegisterNode(Seq(AddressSet(params.bootRegAddress, 4096-1)), device, "reg/control", beatBytes=tlbus.beatBytes) tlbus.coupleTo("boot-address-reg") { node := TLFragmenter(tlbus, Some("BootAddrReg")) := _ } InModuleBody { val bootAddrReg = RegInit(params.defaultBootAddress.U(64.W)) node.regmap(0 -> RegField.bytes(bootAddrReg)) } } } } File ClockGroup.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.prci import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import org.chipsalliance.diplomacy.lazymodule._ import org.chipsalliance.diplomacy.nodes._ import freechips.rocketchip.resources.FixedClockResource case class ClockGroupingNode(groupName: String)(implicit valName: ValName) extends MixedNexusNode(ClockGroupImp, ClockImp)( dFn = { _ => ClockSourceParameters() }, uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq) }) { override def circuitIdentity = outputs.size == 1 } class ClockGroup(groupName: String)(implicit p: Parameters) extends LazyModule { val node = ClockGroupingNode(groupName) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in(0) val (out, _) = node.out.unzip require (node.in.size == 1) require (in.member.size == out.size) (in.member.data zip out) foreach { case (i, o) => o := i } } } object ClockGroup { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroup(valName.name)).node } case class ClockGroupAggregateNode(groupName: String)(implicit valName: ValName) extends NexusNode(ClockGroupImp)( dFn = { _ => ClockGroupSourceParameters() }, uFn = { seq => ClockGroupSinkParameters(name = groupName, members = seq.flatMap(_.members))}) { override def circuitIdentity = outputs.size == 1 } class ClockGroupAggregator(groupName: String)(implicit p: Parameters) extends LazyModule { val node = ClockGroupAggregateNode(groupName) override lazy val desiredName = s"ClockGroupAggregator_$groupName" lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in.unzip val (out, _) = node.out.unzip val outputs = out.flatMap(_.member.data) require (node.in.size == 1, s"Aggregator for groupName: ${groupName} had ${node.in.size} inward edges instead of 1") require (in.head.member.size == outputs.size) in.head.member.data.zip(outputs).foreach { case (i, o) => o := i } } } object ClockGroupAggregator { def apply()(implicit p: Parameters, valName: ValName) = LazyModule(new ClockGroupAggregator(valName.name)).node } class SimpleClockGroupSource(numSources: Int = 1)(implicit p: Parameters) extends LazyModule { val node = ClockGroupSourceNode(List.fill(numSources) { ClockGroupSourceParameters() }) lazy val module = new Impl class Impl extends LazyModuleImp(this) { val (out, _) = node.out.unzip out.map { out: ClockGroupBundle => out.member.data.foreach { o => o.clock := clock; o.reset := reset } } } } object SimpleClockGroupSource { def apply(num: Int = 1)(implicit p: Parameters, valName: ValName) = LazyModule(new SimpleClockGroupSource(num)).node } case class FixedClockBroadcastNode(fixedClockOpt: Option[ClockParameters])(implicit valName: ValName) extends NexusNode(ClockImp)( dFn = { seq => fixedClockOpt.map(_ => ClockSourceParameters(give = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSourceParameters()) }, uFn = { seq => fixedClockOpt.map(_ => ClockSinkParameters(take = fixedClockOpt)).orElse(seq.headOption).getOrElse(ClockSinkParameters()) }, inputRequiresOutput = false) { def fixedClockResources(name: String, prefix: String = "soc/"): Seq[Option[FixedClockResource]] = Seq(fixedClockOpt.map(t => new FixedClockResource(name, t.freqMHz, prefix))) } class FixedClockBroadcast(fixedClockOpt: Option[ClockParameters])(implicit p: Parameters) extends LazyModule { val node = new FixedClockBroadcastNode(fixedClockOpt) { override def circuitIdentity = outputs.size == 1 } lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val (in, _) = node.in(0) val (out, _) = node.out.unzip override def desiredName = s"FixedClockBroadcast_${out.size}" require (node.in.size == 1, "FixedClockBroadcast can only broadcast a single clock") out.foreach { _ := in } } } object FixedClockBroadcast { def apply(fixedClockOpt: Option[ClockParameters] = None)(implicit p: Parameters, valName: ValName) = LazyModule(new FixedClockBroadcast(fixedClockOpt)).node } case class PRCIClockGroupNode()(implicit valName: ValName) extends NexusNode(ClockGroupImp)( dFn = { _ => ClockGroupSourceParameters() }, uFn = { _ => ClockGroupSinkParameters("prci", Nil) }, outputRequiresInput = false) File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File LazyScope.scala: package org.chipsalliance.diplomacy.lazymodule import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.ValName /** Allows dynamic creation of [[Module]] hierarchy and "shoving" logic into a [[LazyModule]]. */ trait LazyScope { this: LazyModule => override def toString: String = s"LazyScope named $name" /** Evaluate `body` in the current [[LazyModule.scope]] */ def apply[T](body: => T): T = { // Preserve the previous value of the [[LazyModule.scope]], because when calling [[apply]] function, // [[LazyModule.scope]] will be altered. val saved = LazyModule.scope // [[LazyModule.scope]] stack push. LazyModule.scope = Some(this) // Evaluate [[body]] in the current `scope`, saving the result to [[out]]. val out = body // Check that the `scope` after evaluating `body` is the same as when we started. require(LazyModule.scope.isDefined, s"LazyScope $name tried to exit, but scope was empty!") require( LazyModule.scope.get eq this, s"LazyScope $name exited before LazyModule ${LazyModule.scope.get.name} was closed" ) // [[LazyModule.scope]] stack pop. LazyModule.scope = saved out } } /** Used to automatically create a level of module hierarchy (a [[SimpleLazyModule]]) within which [[LazyModule]]s can * be instantiated and connected. * * It will instantiate a [[SimpleLazyModule]] to manage evaluation of `body` and evaluate `body` code snippets in this * scope. */ object LazyScope { /** Create a [[LazyScope]] with an implicit instance name. * * @param body * code executed within the generated [[SimpleLazyModule]]. * @param valName * instance name of generated [[SimpleLazyModule]]. * @param p * [[Parameters]] propagated to [[SimpleLazyModule]]. */ def apply[T]( body: => T )( implicit valName: ValName, p: Parameters ): T = { apply(valName.value, "SimpleLazyModule", None)(body)(p) } /** Create a [[LazyScope]] with an explicitly defined instance name. * * @param name * instance name of generated [[SimpleLazyModule]]. * @param body * code executed within the generated `SimpleLazyModule` * @param p * [[Parameters]] propagated to [[SimpleLazyModule]]. */ def apply[T]( name: String )(body: => T )( implicit p: Parameters ): T = { apply(name, "SimpleLazyModule", None)(body)(p) } /** Create a [[LazyScope]] with an explicit instance and class name, and control inlining. * * @param name * instance name of generated [[SimpleLazyModule]]. * @param desiredModuleName * class name of generated [[SimpleLazyModule]]. * @param overrideInlining * tell FIRRTL that this [[SimpleLazyModule]]'s module should be inlined. * @param body * code executed within the generated `SimpleLazyModule` * @param p * [[Parameters]] propagated to [[SimpleLazyModule]]. */ def apply[T]( name: String, desiredModuleName: String, overrideInlining: Option[Boolean] = None )(body: => T )( implicit p: Parameters ): T = { val scope = LazyModule(new SimpleLazyModule with LazyScope { override lazy val desiredName = desiredModuleName override def shouldBeInlined = overrideInlining.getOrElse(super.shouldBeInlined) }).suggestName(name) scope { body } } /** Create a [[LazyScope]] to temporarily group children for some reason, but tell Firrtl to inline it. * * For example, we might want to control a set of children's clocks but then not keep the parent wrapper. * * @param body * code executed within the generated `SimpleLazyModule` * @param p * [[Parameters]] propagated to [[SimpleLazyModule]]. */ def inline[T]( body: => T )( implicit p: Parameters ): T = { apply("noname", "ShouldBeInlined", Some(false))(body)(p) } }
module PeripheryBus_pbus( // @[ClockDomain.scala:14:9] input auto_coupler_to_tailWrite_tl_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_tailWrite_tl_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_coupler_to_tailWrite_tl_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_coupler_to_tailWrite_tl_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_coupler_to_tailWrite_tl_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_coupler_to_tailWrite_tl_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [13:0] auto_coupler_to_tailWrite_tl_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_coupler_to_tailWrite_tl_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_coupler_to_tailWrite_tl_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_tailWrite_tl_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_tailWrite_tl_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_coupler_to_tailWrite_tl_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_coupler_to_tailWrite_tl_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_coupler_to_tailWrite_tl_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_coupler_to_tailWrite_tl_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_coupler_to_tailWrite_tl_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_coupler_to_device_named_uart_0_control_xing_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_device_named_uart_0_control_xing_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_coupler_to_device_named_uart_0_control_xing_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [2:0] auto_coupler_to_device_named_uart_0_control_xing_out_a_bits_param, // @[LazyModuleImp.scala:107:25] output [1:0] auto_coupler_to_device_named_uart_0_control_xing_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [10:0] auto_coupler_to_device_named_uart_0_control_xing_out_a_bits_source, // @[LazyModuleImp.scala:107:25] output [28:0] auto_coupler_to_device_named_uart_0_control_xing_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_coupler_to_device_named_uart_0_control_xing_out_a_bits_mask, // @[LazyModuleImp.scala:107:25] output [63:0] auto_coupler_to_device_named_uart_0_control_xing_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_device_named_uart_0_control_xing_out_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_coupler_to_device_named_uart_0_control_xing_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_coupler_to_device_named_uart_0_control_xing_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_coupler_to_device_named_uart_0_control_xing_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_coupler_to_device_named_uart_0_control_xing_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [10:0] auto_coupler_to_device_named_uart_0_control_xing_out_d_bits_source, // @[LazyModuleImp.scala:107:25] input [63:0] auto_coupler_to_device_named_uart_0_control_xing_out_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_fixedClockNode_anon_out_1_clock, // @[LazyModuleImp.scala:107:25] output auto_fixedClockNode_anon_out_1_reset, // @[LazyModuleImp.scala:107:25] output auto_fixedClockNode_anon_out_0_clock, // @[LazyModuleImp.scala:107:25] output auto_fixedClockNode_anon_out_0_reset, // @[LazyModuleImp.scala:107:25] input auto_pbus_clock_groups_in_member_pbus_0_clock, // @[LazyModuleImp.scala:107:25] input auto_pbus_clock_groups_in_member_pbus_0_reset, // @[LazyModuleImp.scala:107:25] output auto_bus_xing_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_bus_xing_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_bus_xing_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_bus_xing_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [2:0] auto_bus_xing_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [6:0] auto_bus_xing_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [28:0] auto_bus_xing_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_bus_xing_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_bus_xing_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_bus_xing_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_bus_xing_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_bus_xing_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_bus_xing_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_bus_xing_in_d_bits_param, // @[LazyModuleImp.scala:107:25] output [2:0] auto_bus_xing_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [6:0] auto_bus_xing_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output auto_bus_xing_in_d_bits_sink, // @[LazyModuleImp.scala:107:25] output auto_bus_xing_in_d_bits_denied, // @[LazyModuleImp.scala:107:25] output [63:0] auto_bus_xing_in_d_bits_data, // @[LazyModuleImp.scala:107:25] output auto_bus_xing_in_d_bits_corrupt // @[LazyModuleImp.scala:107:25] ); wire out_woready_7; // @[RegisterRouter.scala:87:24] wire _coupler_to_device_named_uart_0_auto_tl_in_a_ready; // @[LazyScope.scala:98:27] wire _coupler_to_device_named_uart_0_auto_tl_in_d_valid; // @[LazyScope.scala:98:27] wire [2:0] _coupler_to_device_named_uart_0_auto_tl_in_d_bits_opcode; // @[LazyScope.scala:98:27] wire [2:0] _coupler_to_device_named_uart_0_auto_tl_in_d_bits_size; // @[LazyScope.scala:98:27] wire [6:0] _coupler_to_device_named_uart_0_auto_tl_in_d_bits_source; // @[LazyScope.scala:98:27] wire [63:0] _coupler_to_device_named_uart_0_auto_tl_in_d_bits_data; // @[LazyScope.scala:98:27] wire _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_valid; // @[LazyScope.scala:98:27] wire [2:0] _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_opcode; // @[LazyScope.scala:98:27] wire [2:0] _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_param; // @[LazyScope.scala:98:27] wire [1:0] _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_size; // @[LazyScope.scala:98:27] wire [10:0] _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_source; // @[LazyScope.scala:98:27] wire [12:0] _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_address; // @[LazyScope.scala:98:27] wire [7:0] _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_mask; // @[LazyScope.scala:98:27] wire [63:0] _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_data; // @[LazyScope.scala:98:27] wire _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_corrupt; // @[LazyScope.scala:98:27] wire _coupler_to_bootaddressreg_auto_fragmenter_anon_out_d_ready; // @[LazyScope.scala:98:27] wire _coupler_to_bootaddressreg_auto_tl_in_a_ready; // @[LazyScope.scala:98:27] wire _coupler_to_bootaddressreg_auto_tl_in_d_valid; // @[LazyScope.scala:98:27] wire [2:0] _coupler_to_bootaddressreg_auto_tl_in_d_bits_opcode; // @[LazyScope.scala:98:27] wire [2:0] _coupler_to_bootaddressreg_auto_tl_in_d_bits_size; // @[LazyScope.scala:98:27] wire [6:0] _coupler_to_bootaddressreg_auto_tl_in_d_bits_source; // @[LazyScope.scala:98:27] wire [63:0] _coupler_to_bootaddressreg_auto_tl_in_d_bits_data; // @[LazyScope.scala:98:27] wire _buffer_1_auto_out_a_valid; // @[Buffer.scala:75:28] wire [2:0] _buffer_1_auto_out_a_bits_opcode; // @[Buffer.scala:75:28] wire [2:0] _buffer_1_auto_out_a_bits_param; // @[Buffer.scala:75:28] wire [2:0] _buffer_1_auto_out_a_bits_size; // @[Buffer.scala:75:28] wire [6:0] _buffer_1_auto_out_a_bits_source; // @[Buffer.scala:75:28] wire [28:0] _buffer_1_auto_out_a_bits_address; // @[Buffer.scala:75:28] wire [7:0] _buffer_1_auto_out_a_bits_mask; // @[Buffer.scala:75:28] wire [63:0] _buffer_1_auto_out_a_bits_data; // @[Buffer.scala:75:28] wire _buffer_1_auto_out_a_bits_corrupt; // @[Buffer.scala:75:28] wire _buffer_1_auto_out_d_ready; // @[Buffer.scala:75:28] wire _atomics_auto_in_a_ready; // @[AtomicAutomata.scala:289:29] wire _atomics_auto_in_d_valid; // @[AtomicAutomata.scala:289:29] wire [2:0] _atomics_auto_in_d_bits_opcode; // @[AtomicAutomata.scala:289:29] wire [1:0] _atomics_auto_in_d_bits_param; // @[AtomicAutomata.scala:289:29] wire [2:0] _atomics_auto_in_d_bits_size; // @[AtomicAutomata.scala:289:29] wire [6:0] _atomics_auto_in_d_bits_source; // @[AtomicAutomata.scala:289:29] wire _atomics_auto_in_d_bits_sink; // @[AtomicAutomata.scala:289:29] wire _atomics_auto_in_d_bits_denied; // @[AtomicAutomata.scala:289:29] wire [63:0] _atomics_auto_in_d_bits_data; // @[AtomicAutomata.scala:289:29] wire _atomics_auto_in_d_bits_corrupt; // @[AtomicAutomata.scala:289:29] wire _atomics_auto_out_a_valid; // @[AtomicAutomata.scala:289:29] wire [2:0] _atomics_auto_out_a_bits_opcode; // @[AtomicAutomata.scala:289:29] wire [2:0] _atomics_auto_out_a_bits_param; // @[AtomicAutomata.scala:289:29] wire [2:0] _atomics_auto_out_a_bits_size; // @[AtomicAutomata.scala:289:29] wire [6:0] _atomics_auto_out_a_bits_source; // @[AtomicAutomata.scala:289:29] wire [28:0] _atomics_auto_out_a_bits_address; // @[AtomicAutomata.scala:289:29] wire [7:0] _atomics_auto_out_a_bits_mask; // @[AtomicAutomata.scala:289:29] wire [63:0] _atomics_auto_out_a_bits_data; // @[AtomicAutomata.scala:289:29] wire _atomics_auto_out_a_bits_corrupt; // @[AtomicAutomata.scala:289:29] wire _atomics_auto_out_d_ready; // @[AtomicAutomata.scala:289:29] wire _buffer_auto_in_a_ready; // @[Buffer.scala:75:28] wire _buffer_auto_in_d_valid; // @[Buffer.scala:75:28] wire [2:0] _buffer_auto_in_d_bits_opcode; // @[Buffer.scala:75:28] wire [1:0] _buffer_auto_in_d_bits_param; // @[Buffer.scala:75:28] wire [2:0] _buffer_auto_in_d_bits_size; // @[Buffer.scala:75:28] wire [6:0] _buffer_auto_in_d_bits_source; // @[Buffer.scala:75:28] wire _buffer_auto_in_d_bits_sink; // @[Buffer.scala:75:28] wire _buffer_auto_in_d_bits_denied; // @[Buffer.scala:75:28] wire [63:0] _buffer_auto_in_d_bits_data; // @[Buffer.scala:75:28] wire _buffer_auto_in_d_bits_corrupt; // @[Buffer.scala:75:28] wire _buffer_auto_out_a_valid; // @[Buffer.scala:75:28] wire [2:0] _buffer_auto_out_a_bits_opcode; // @[Buffer.scala:75:28] wire [2:0] _buffer_auto_out_a_bits_param; // @[Buffer.scala:75:28] wire [2:0] _buffer_auto_out_a_bits_size; // @[Buffer.scala:75:28] wire [6:0] _buffer_auto_out_a_bits_source; // @[Buffer.scala:75:28] wire [28:0] _buffer_auto_out_a_bits_address; // @[Buffer.scala:75:28] wire [7:0] _buffer_auto_out_a_bits_mask; // @[Buffer.scala:75:28] wire [63:0] _buffer_auto_out_a_bits_data; // @[Buffer.scala:75:28] wire _buffer_auto_out_a_bits_corrupt; // @[Buffer.scala:75:28] wire _buffer_auto_out_d_ready; // @[Buffer.scala:75:28] wire _out_xbar_auto_anon_in_a_ready; // @[PeripheryBus.scala:57:30] wire _out_xbar_auto_anon_in_d_valid; // @[PeripheryBus.scala:57:30] wire [2:0] _out_xbar_auto_anon_in_d_bits_opcode; // @[PeripheryBus.scala:57:30] wire [2:0] _out_xbar_auto_anon_in_d_bits_size; // @[PeripheryBus.scala:57:30] wire [6:0] _out_xbar_auto_anon_in_d_bits_source; // @[PeripheryBus.scala:57:30] wire [63:0] _out_xbar_auto_anon_in_d_bits_data; // @[PeripheryBus.scala:57:30] wire _out_xbar_auto_anon_out_1_a_valid; // @[PeripheryBus.scala:57:30] wire [2:0] _out_xbar_auto_anon_out_1_a_bits_opcode; // @[PeripheryBus.scala:57:30] wire [2:0] _out_xbar_auto_anon_out_1_a_bits_param; // @[PeripheryBus.scala:57:30] wire [2:0] _out_xbar_auto_anon_out_1_a_bits_size; // @[PeripheryBus.scala:57:30] wire [6:0] _out_xbar_auto_anon_out_1_a_bits_source; // @[PeripheryBus.scala:57:30] wire [28:0] _out_xbar_auto_anon_out_1_a_bits_address; // @[PeripheryBus.scala:57:30] wire [7:0] _out_xbar_auto_anon_out_1_a_bits_mask; // @[PeripheryBus.scala:57:30] wire [63:0] _out_xbar_auto_anon_out_1_a_bits_data; // @[PeripheryBus.scala:57:30] wire _out_xbar_auto_anon_out_1_a_bits_corrupt; // @[PeripheryBus.scala:57:30] wire _out_xbar_auto_anon_out_1_d_ready; // @[PeripheryBus.scala:57:30] wire _out_xbar_auto_anon_out_0_a_valid; // @[PeripheryBus.scala:57:30] wire [2:0] _out_xbar_auto_anon_out_0_a_bits_opcode; // @[PeripheryBus.scala:57:30] wire [2:0] _out_xbar_auto_anon_out_0_a_bits_param; // @[PeripheryBus.scala:57:30] wire [2:0] _out_xbar_auto_anon_out_0_a_bits_size; // @[PeripheryBus.scala:57:30] wire [6:0] _out_xbar_auto_anon_out_0_a_bits_source; // @[PeripheryBus.scala:57:30] wire [12:0] _out_xbar_auto_anon_out_0_a_bits_address; // @[PeripheryBus.scala:57:30] wire [7:0] _out_xbar_auto_anon_out_0_a_bits_mask; // @[PeripheryBus.scala:57:30] wire [63:0] _out_xbar_auto_anon_out_0_a_bits_data; // @[PeripheryBus.scala:57:30] wire _out_xbar_auto_anon_out_0_a_bits_corrupt; // @[PeripheryBus.scala:57:30] wire _out_xbar_auto_anon_out_0_d_ready; // @[PeripheryBus.scala:57:30] wire _fixedClockNode_auto_anon_out_0_clock; // @[ClockGroup.scala:115:114] wire _fixedClockNode_auto_anon_out_0_reset; // @[ClockGroup.scala:115:114] reg [63:0] pad; // @[BootAddrReg.scala:27:34] wire in_bits_read = _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_opcode == 3'h4; // @[RegisterRouter.scala:74:36] wire _out_T_1 = _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_address[11:3] == 9'h0; // @[RegisterRouter.scala:75:19, :87:24] wire valids_0 = out_woready_7 & _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_mask[0]; // @[RegisterRouter.scala:87:24] wire valids_1 = out_woready_7 & _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_mask[1]; // @[RegisterRouter.scala:87:24] wire valids_2 = out_woready_7 & _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_mask[2]; // @[RegisterRouter.scala:87:24] wire valids_3 = out_woready_7 & _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_mask[3]; // @[RegisterRouter.scala:87:24] wire valids_4 = out_woready_7 & _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_mask[4]; // @[RegisterRouter.scala:87:24] wire valids_5 = out_woready_7 & _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_mask[5]; // @[RegisterRouter.scala:87:24] wire valids_6 = out_woready_7 & _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_mask[6]; // @[RegisterRouter.scala:87:24] wire valids_7 = out_woready_7 & _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_mask[7]; // @[RegisterRouter.scala:87:24] assign out_woready_7 = _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_valid & _coupler_to_bootaddressreg_auto_fragmenter_anon_out_d_ready & ~in_bits_read & _out_T_1; // @[RegisterRouter.scala:74:36, :87:24] wire [2:0] nodeIn_d_bits_opcode = {2'h0, in_bits_read}; // @[RegisterRouter.scala:74:36, :105:19] always @(posedge _fixedClockNode_auto_anon_out_0_clock) begin // @[ClockGroup.scala:115:114] if (_fixedClockNode_auto_anon_out_0_reset) // @[ClockGroup.scala:115:114] pad <= 64'h80000000; // @[BootAddrReg.scala:27:34] else if (valids_0 | valids_1 | valids_2 | valids_3 | valids_4 | valids_5 | valids_6 | valids_7) // @[RegisterRouter.scala:87:24] pad <= {valids_7 ? _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_data[63:56] : pad[63:56], valids_6 ? _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_data[55:48] : pad[55:48], valids_5 ? _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_data[47:40] : pad[47:40], valids_4 ? _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_data[39:32] : pad[39:32], valids_3 ? _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_data[31:24] : pad[31:24], valids_2 ? _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_data[23:16] : pad[23:16], valids_1 ? _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_data[15:8] : pad[15:8], valids_0 ? _coupler_to_bootaddressreg_auto_fragmenter_anon_out_a_bits_data[7:0] : pad[7:0]}; // @[BootAddrReg.scala:27:34] always @(posedge)
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_176( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [19:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54] assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundAnyRawFNToRecFN_ie11_is55_oe11_os53_2( // @[RoundAnyRawFNToRecFN.scala:48:5] input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_infiniteExc, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16] input [12:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16] input [55:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16] input [2:0] io_roundingMode, // @[RoundAnyRawFNToRecFN.scala:58:16] output [64:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16] ); wire roundingMode_near_even = io_roundingMode == 3'h0; // @[RoundAnyRawFNToRecFN.scala:90:53] wire roundingMode_odd = io_roundingMode == 3'h6; // @[RoundAnyRawFNToRecFN.scala:95:53] wire roundMagUp = io_roundingMode == 3'h2 & io_in_sign | io_roundingMode == 3'h3 & ~io_in_sign; // @[RoundAnyRawFNToRecFN.scala:92:53, :93:53, :98:{27,42,63,66}] wire [11:0] _roundMask_T_1 = ~(io_in_sExp[11:0]); // @[primitives.scala:52:21] wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> _roundMask_T_1[5:0]); // @[primitives.scala:52:21, :59:26, :76:56] wire [18:0] _GEN = {roundMask_shift[18:17], roundMask_shift[20:19], roundMask_shift[22:21], roundMask_shift[24:23], roundMask_shift[26:25], roundMask_shift[28:27], roundMask_shift[30:29], roundMask_shift[32:31], roundMask_shift[34:33], roundMask_shift[36]} & 19'h55555; // @[primitives.scala:76:56, :77:20, :78:22] wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> _roundMask_T_1[5:0]); // @[primitives.scala:52:21, :59:26, :76:56] wire [53:0] _roundMask_T_129 = _roundMask_T_1[11] ? (_roundMask_T_1[10] ? {~(_roundMask_T_1[9] | _roundMask_T_1[8] | _roundMask_T_1[7] | _roundMask_T_1[6] ? 51'h0 : ~{roundMask_shift[13], roundMask_shift[14], roundMask_shift[15], roundMask_shift[16], roundMask_shift[17], _GEN[18:15] | {roundMask_shift[20:19], roundMask_shift[22:21]} & 4'h5, roundMask_shift[22], _GEN[13] | roundMask_shift[23], roundMask_shift[24], roundMask_shift[25], _GEN[10:7] | {roundMask_shift[28:27], roundMask_shift[30:29]} & 4'h5, roundMask_shift[30], _GEN[5] | roundMask_shift[31], roundMask_shift[32], roundMask_shift[33], {_GEN[2:0], 1'h0} | {roundMask_shift[36:35], roundMask_shift[38:37]} & 4'h5, roundMask_shift[38], roundMask_shift[39], roundMask_shift[40], roundMask_shift[41], roundMask_shift[42], roundMask_shift[43], roundMask_shift[44], roundMask_shift[45], roundMask_shift[46], roundMask_shift[47], roundMask_shift[48], roundMask_shift[49], roundMask_shift[50], roundMask_shift[51], roundMask_shift[52], roundMask_shift[53], roundMask_shift[54], roundMask_shift[55], roundMask_shift[56], roundMask_shift[57], roundMask_shift[58], roundMask_shift[59], roundMask_shift[60], roundMask_shift[61], roundMask_shift[62], roundMask_shift[63]}), 3'h7} : {51'h0, _roundMask_T_1[9] & _roundMask_T_1[8] & _roundMask_T_1[7] & _roundMask_T_1[6] ? {roundMask_shift_1[0], roundMask_shift_1[1], roundMask_shift_1[2]} : 3'h0}) : 54'h0; // @[primitives.scala:52:21, :58:25, :59:26, :62:24, :67:24, :68:58, :73:{17,21,32}, :76:56, :77:20, :78:22] wire [54:0] _GEN_0 = {1'h1, ~_roundMask_T_129}; // @[primitives.scala:62:24] wire [54:0] _GEN_1 = {_roundMask_T_129, 1'h1}; // @[primitives.scala:62:24] wire [54:0] _roundPosBit_T = io_in_sig[55:1] & _GEN_0 & _GEN_1; // @[RoundAnyRawFNToRecFN.scala:58:16, :159:42, :162:53, :163:46, :164:40] wire [54:0] _anyRoundExtra_T = io_in_sig[54:0] & _GEN_1; // @[RoundAnyRawFNToRecFN.scala:58:16, :159:42, :165:42] wire [109:0] _GEN_2 = {_roundPosBit_T, _anyRoundExtra_T}; // @[RoundAnyRawFNToRecFN.scala:163:46, :164:{40,56}, :165:{42,62}, :166:36] wire _overflow_roundMagUp_T = roundingMode_near_even | io_roundingMode == 3'h4; // @[RoundAnyRawFNToRecFN.scala:90:53, :94:53, :169:38] wire [54:0] roundedSig = _overflow_roundMagUp_T & (|_roundPosBit_T) | roundMagUp & (|_GEN_2) ? {1'h0, io_in_sig[55:2] | _roundMask_T_129} + 55'h1 & ~(roundingMode_near_even & (|_roundPosBit_T) & _anyRoundExtra_T == 55'h0 ? {_roundMask_T_129, 1'h1} : 55'h0) : {1'h0, io_in_sig[55:2] & ~_roundMask_T_129} | (roundingMode_odd & (|_GEN_2) ? _GEN_0 & _GEN_1 : 55'h0); // @[primitives.scala:62:24] wire [13:0] sRoundedExp = {io_in_sExp[12], io_in_sExp} + {12'h0, roundedSig[54:53]}; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:{40,54}] wire common_totalUnderflow = $signed(sRoundedExp) < 14'sh3CE; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31] wire isNaNOut = io_invalidExc | io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:235:34] wire notNaN_isSpecialInfOut = io_infiniteExc | io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:236:49] wire commonCase = ~isNaNOut & ~notNaN_isSpecialInfOut & ~io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:235:34, :236:49, :237:{22,33,36,61,64}] wire overflow = commonCase & $signed(sRoundedExp[13:10]) > 4'sh2; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:{30,50}, :237:{33,61}, :238:32] wire overflow_roundMagUp = _overflow_roundMagUp_T | roundMagUp; // @[RoundAnyRawFNToRecFN.scala:98:42, :169:38, :243:60] wire pegMinNonzeroMagOut = commonCase & common_totalUnderflow & (roundMagUp | roundingMode_odd); // @[RoundAnyRawFNToRecFN.scala:95:53, :98:42, :200:31, :237:{33,61}, :245:{20,45,60}] wire pegMaxFiniteMagOut = overflow & ~overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:238:32, :243:60, :246:{39,42}] wire notNaN_isInfOut = notNaN_isSpecialInfOut | overflow & overflow_roundMagUp; // @[RoundAnyRawFNToRecFN.scala:236:49, :238:32, :243:60, :248:{32,45}] assign io_out = {~isNaNOut & io_in_sign, sRoundedExp[11:0] & ~(io_in_isZero | common_totalUnderflow ? 12'hE00 : 12'h0) & ~(pegMinNonzeroMagOut ? 12'hC31 : 12'h0) & {1'h1, ~pegMaxFiniteMagOut, 10'h3FF} & {2'h3, ~notNaN_isInfOut, 9'h1FF} | (pegMinNonzeroMagOut ? 12'h3CE : 12'h0) | (pegMaxFiniteMagOut ? 12'hBFF : 12'h0) | (notNaN_isInfOut ? 12'hC00 : 12'h0) | (isNaNOut ? 12'hE00 : 12'h0), (isNaNOut | io_in_isZero | common_totalUnderflow ? {isNaNOut, 51'h0} : roundedSig[51:0]) | {52{pegMaxFiniteMagOut}}}; // @[RoundAnyRawFNToRecFN.scala:48:5, :173:16, :185:40, :187:37, :191:27, :200:31, :235:34, :245:{20,45}, :246:39, :248:32, :250:22, :252:24, :253:{14,18,32}, :256:17, :257:{14,18}, :260:17, :261:{14,18}, :264:17, :265:{14,18}, :268:18, :269:16, :272:15, :273:16, :276:15, :277:{16,73}, :278:16, :280:{12,22,38}, :281:16, :283:11, :284:13, :286:33] assign io_exceptionFlags = {io_invalidExc, io_infiniteExc, overflow, commonCase & (common_totalUnderflow | (|_GEN_2) & io_in_sExp[12:11] != 2'h1 & _roundMask_T_129[0] & ~(~(_roundMask_T_129[1]) & roundedSig[53] & (|_roundPosBit_T) & (_overflow_roundMagUp_T & io_in_sig[1] | roundMagUp & (|(io_in_sig[1:0]))))), overflow | commonCase & (|{common_totalUnderflow, _roundPosBit_T, _anyRoundExtra_T})}; // @[primitives.scala:62:24] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Transposer.scala: package gemmini import chisel3._ import chisel3.util._ import Util._ trait Transposer[T <: Data] extends Module { def dim: Int def dataType: T val io = IO(new Bundle { val inRow = Flipped(Decoupled(Vec(dim, dataType))) val outCol = Decoupled(Vec(dim, dataType)) }) } class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose val sMoveUp :: sMoveLeft :: Nil = Enum(2) val state = RegInit(sMoveUp) val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1) val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1) io.outCol.valid := 0.U io.inRow.ready := 0.U switch(state) { is(sMoveUp) { io.inRow.ready := upCounter <= dim.U io.outCol.valid := leftCounter > 0.U when(io.inRow.fire) { upCounter := upCounter + 1.U } when(upCounter === (dim-1).U) { state := sMoveLeft leftCounter := 0.U } when(io.outCol.fire) { leftCounter := leftCounter - 1.U } } is(sMoveLeft) { io.inRow.ready := leftCounter <= dim.U // TODO: this is naive io.outCol.valid := upCounter > 0.U when(leftCounter === (dim-1).U) { state := sMoveUp } when(io.inRow.fire) { leftCounter := leftCounter + 1.U upCounter := 0.U } when(io.outCol.fire) { upCounter := upCounter - 1.U } } } // Propagate input from bottom row to top row systolically in the move up phase // TODO: need to iterate over columns to connect Chisel values of type T // Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?) for (colIdx <- 0 until dim) { regArray.foldRight(io.inRow.bits(colIdx)) { case (regRow, prevReg) => when (state === sMoveUp) { regRow(colIdx) := prevReg } regRow(colIdx) } } // Propagate input from right side to left side systolically in the move left phase for (rowIdx <- 0 until dim) { regArrayT.foldRight(io.inRow.bits(rowIdx)) { case (regCol, prevReg) => when (state === sMoveLeft) { regCol(rowIdx) := prevReg } regCol(rowIdx) } } // Pull from the left side or the top side based on the state for (idx <- 0 until dim) { when (state === sMoveUp) { io.outCol.bits(idx) := regArray(0)(idx) }.elsewhen(state === sMoveLeft) { io.outCol.bits(idx) := regArrayT(0)(idx) }.otherwise { io.outCol.bits(idx) := DontCare } } } class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val LEFT_DIR = 0.U(1.W) val UP_DIR = 1.U(1.W) class PE extends Module { val io = IO(new Bundle { val inR = Input(dataType) val inD = Input(dataType) val outL = Output(dataType) val outU = Output(dataType) val dir = Input(UInt(1.W)) val en = Input(Bool()) }) val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en) io.outU := reg io.outL := reg } val pes = Seq.fill(dim,dim)(Module(new PE)) val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter val dir = RegInit(LEFT_DIR) // Wire up horizontal signals for (row <- 0 until dim; col <- 0 until dim) { val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL pes(row)(col).io.inR := right_in } // Wire up vertical signals for (row <- 0 until dim; col <- 0 until dim) { val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU pes(row)(col).io.inD := down_in } // Wire up global signals pes.flatten.foreach(_.io.dir := dir) pes.flatten.foreach(_.io.en := io.inRow.fire) io.outCol.valid := true.B io.inRow.ready := true.B val left_out = VecInit(pes.transpose.head.map(_.io.outL)) val up_out = VecInit(pes.head.map(_.io.outU)) io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out) when (io.inRow.fire) { counter := wrappingAdd(counter, 1.U, dim) } when (counter === (dim-1).U && io.inRow.fire) { dir := ~dir } } class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose // state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise val state = RegInit(0.U(1.W)) val countInc = io.inRow.fire || io.outCol.fire val (countValue, countWrap) = Counter(countInc, dim) io.inRow.ready := state === 0.U io.outCol.valid := state === 1.U for (i <- 0 until dim) { for (j <- 0 until dim) { when(countValue === i.U && io.inRow.fire) { regArray(i)(j) := io.inRow.bits(j) } } } for (i <- 0 until dim) { io.outCol.bits(i) := 0.U for (j <- 0 until dim) { when(countValue === j.U) { io.outCol.bits(i) := regArrayT(j)(i) } } } when (io.inRow.fire && countWrap) { state := 1.U } when (io.outCol.fire && countWrap) { state := 0.U } assert(!(state === 0.U) || !io.outCol.fire) assert(!(state === 1.U) || !io.inRow.fire) }
module PE_138( // @[Transposer.scala:100:9] input clock, // @[Transposer.scala:100:9] input reset, // @[Transposer.scala:100:9] input [7:0] io_inR, // @[Transposer.scala:101:16] input [7:0] io_inD, // @[Transposer.scala:101:16] output [7:0] io_outL, // @[Transposer.scala:101:16] output [7:0] io_outU, // @[Transposer.scala:101:16] input io_dir, // @[Transposer.scala:101:16] input io_en // @[Transposer.scala:101:16] ); wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9] wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9] wire io_dir_0 = io_dir; // @[Transposer.scala:100:9] wire io_en_0 = io_en; // @[Transposer.scala:100:9] wire [7:0] io_outL_0; // @[Transposer.scala:100:9] wire [7:0] io_outU_0; // @[Transposer.scala:100:9] wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36] wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}] reg [7:0] reg_0; // @[Transposer.scala:110:24] assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24] assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24] always @(posedge clock) begin // @[Transposer.scala:100:9] if (io_en_0) // @[Transposer.scala:100:9] reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}] always @(posedge) assign io_outL = io_outL_0; // @[Transposer.scala:100:9] assign io_outU = io_outU_0; // @[Transposer.scala:100:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File SynchronizerReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util.{RegEnable, Cat} /** These wrap behavioral * shift and next registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * * These are built up of *ResetSynchronizerPrimitiveShiftReg, * intended to be replaced by the integrator's metastable flops chains or replaced * at this level if they have a multi-bit wide synchronizer primitive. * The different types vary in their reset behavior: * NonSyncResetSynchronizerShiftReg -- Register array which does not have a reset pin * AsyncResetSynchronizerShiftReg -- Asynchronously reset register array, constructed from W instantiations of D deep * 1-bit-wide shift registers. * SyncResetSynchronizerShiftReg -- Synchronously reset register array, constructed similarly to AsyncResetSynchronizerShiftReg * * [Inferred]ResetSynchronizerShiftReg -- TBD reset type by chisel3 reset inference. * * ClockCrossingReg -- Not made up of SynchronizerPrimitiveShiftReg. This is for single-deep flops which cross * Clock Domains. */ object SynchronizerResetType extends Enumeration { val NonSync, Inferred, Sync, Async = Value } // Note: this should not be used directly. // Use the companion object to generate this with the correct reset type mixin. private class SynchronizerPrimitiveShiftReg( sync: Int, init: Boolean, resetType: SynchronizerResetType.Value) extends AbstractPipelineReg(1) { val initInt = if (init) 1 else 0 val initPostfix = resetType match { case SynchronizerResetType.NonSync => "" case _ => s"_i${initInt}" } override def desiredName = s"${resetType.toString}ResetSynchronizerPrimitiveShiftReg_d${sync}${initPostfix}" val chain = List.tabulate(sync) { i => val reg = if (resetType == SynchronizerResetType.NonSync) Reg(Bool()) else RegInit(init.B) reg.suggestName(s"sync_$i") } chain.last := io.d.asBool (chain.init zip chain.tail).foreach { case (sink, source) => sink := source } io.q := chain.head.asUInt } private object SynchronizerPrimitiveShiftReg { def apply (in: Bool, sync: Int, init: Boolean, resetType: SynchronizerResetType.Value): Bool = { val gen: () => SynchronizerPrimitiveShiftReg = resetType match { case SynchronizerResetType.NonSync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) case SynchronizerResetType.Async => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireAsyncReset case SynchronizerResetType.Sync => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) with RequireSyncReset case SynchronizerResetType.Inferred => () => new SynchronizerPrimitiveShiftReg(sync, init, resetType) } AbstractPipelineReg(gen(), in) } } // Note: This module may end up with a non-AsyncReset type reset. // But the Primitives within will always have AsyncReset type. class AsyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"AsyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asAsyncReset){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Async) } } io.q := Cat(output.reverse) } object AsyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } // Note: This module may end up with a non-Bool type reset. // But the Primitives within will always have Bool reset type. @deprecated("SyncResetSynchronizerShiftReg is unecessary with Chisel3 inferred resets. Use ResetSynchronizerShiftReg which will use the inferred reset type.", "rocket-chip 1.2") class SyncResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SyncResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 withReset(reset.asBool){ SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Sync) } } io.q := Cat(output.reverse) } object SyncResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SyncResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class ResetSynchronizerShiftReg(w: Int = 1, sync: Int, init: Int) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"ResetSynchronizerShiftReg_w${w}_d${sync}_i${init}" val output = Seq.tabulate(w) { i => val initBit = ((init >> i) & 1) > 0 SynchronizerPrimitiveShiftReg(io.d(i), sync, initBit, SynchronizerResetType.Inferred) } io.q := Cat(output.reverse) } object ResetSynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, init: Int, name: Option[String] = None): T = AbstractPipelineReg(new ResetSynchronizerShiftReg(in.getWidth, sync, init), in, name) def apply [T <: Data](in: T, sync: Int, name: Option[String]): T = apply (in, sync, 0, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, 0, None) def apply [T <: Data](in: T, sync: Int, init: T, name: Option[String]): T = apply(in, sync, init.litValue.toInt, name) def apply [T <: Data](in: T, sync: Int, init: T): T = apply (in, sync, init.litValue.toInt, None) } class SynchronizerShiftReg(w: Int = 1, sync: Int = 3) extends AbstractPipelineReg(w) { require(sync > 1, s"Sync must be greater than 1, not ${sync}.") override def desiredName = s"SynchronizerShiftReg_w${w}_d${sync}" val output = Seq.tabulate(w) { i => SynchronizerPrimitiveShiftReg(io.d(i), sync, false, SynchronizerResetType.NonSync) } io.q := Cat(output.reverse) } object SynchronizerShiftReg { def apply [T <: Data](in: T, sync: Int, name: Option[String] = None): T = if (sync == 0) in else AbstractPipelineReg(new SynchronizerShiftReg(in.getWidth, sync), in, name) def apply [T <: Data](in: T, sync: Int): T = apply (in, sync, None) def apply [T <: Data](in: T): T = apply (in, 3, None) } class ClockCrossingReg(w: Int = 1, doInit: Boolean) extends Module { override def desiredName = s"ClockCrossingReg_w${w}" val io = IO(new Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) }) val cdc_reg = if (doInit) RegEnable(io.d, 0.U(w.W), io.en) else RegEnable(io.d, io.en) io.q := cdc_reg } object ClockCrossingReg { def apply [T <: Data](in: T, en: Bool, doInit: Boolean, name: Option[String] = None): T = { val cdc_reg = Module(new ClockCrossingReg(in.getWidth, doInit)) name.foreach{ cdc_reg.suggestName(_) } cdc_reg.io.d := in.asUInt cdc_reg.io.en := en cdc_reg.io.q.asTypeOf(in) } }
module AsyncResetSynchronizerShiftReg_w1_d3_i0_107( // @[SynchronizerReg.scala:80:7] input clock, // @[SynchronizerReg.scala:80:7] input reset, // @[SynchronizerReg.scala:80:7] input io_d, // @[ShiftReg.scala:36:14] output io_q // @[ShiftReg.scala:36:14] ); wire io_d_0 = io_d; // @[SynchronizerReg.scala:80:7] wire _output_T = reset; // @[SynchronizerReg.scala:86:21] wire _output_T_1 = io_d_0; // @[SynchronizerReg.scala:80:7, :87:41] wire output_0; // @[ShiftReg.scala:48:24] wire io_q_0; // @[SynchronizerReg.scala:80:7] assign io_q_0 = output_0; // @[SynchronizerReg.scala:80:7] AsyncResetSynchronizerPrimitiveShiftReg_d3_i0_183 output_chain ( // @[ShiftReg.scala:45:23] .clock (clock), .reset (_output_T), // @[SynchronizerReg.scala:86:21] .io_d (_output_T_1), // @[SynchronizerReg.scala:87:41] .io_q (output_0) ); // @[ShiftReg.scala:45:23] assign io_q = io_q_0; // @[SynchronizerReg.scala:80:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File loop.scala: package boom.v4.ifu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import boom.v4.common._ import boom.v4.util.{BoomCoreStringPrefix} import scala.math.min case class BoomLoopPredictorParams( nWays: Int = 4, threshold: Int = 7 ) class LoopBranchPredictorBank(implicit p: Parameters) extends BranchPredictorBank()(p) { val tagSz = 10 override val nSets = 16 class LoopMeta extends Bundle { val s_cnt = UInt(10.W) } class LoopEntry extends Bundle { val tag = UInt(tagSz.W) val conf = UInt(3.W) val age = UInt(3.W) val p_cnt = UInt(10.W) val s_cnt = UInt(10.W) } class LoopBranchPredictorColumn extends Module { val io = IO(new Bundle { val f2_req_valid = Input(Bool()) val f2_req_idx = Input(UInt()) val f3_req_fire = Input(Bool()) val f3_pred_in = Input(Bool()) val f3_pred = Output(Bool()) val f3_meta = Output(new LoopMeta) val update_mispredict = Input(Bool()) val update_repair = Input(Bool()) val update_idx = Input(UInt()) val update_resolve_dir = Input(Bool()) val update_meta = Input(new LoopMeta) }) val doing_reset = RegInit(true.B) val reset_idx = RegInit(0.U(log2Ceil(nSets).W)) reset_idx := reset_idx + doing_reset when (reset_idx === (nSets-1).U) { doing_reset := false.B } val entries = Reg(Vec(nSets, new LoopEntry)) val f2_entry = WireInit(entries(io.f2_req_idx)) when (io.update_repair && io.update_idx === io.f2_req_idx) { f2_entry.s_cnt := io.update_meta.s_cnt } .elsewhen (io.update_mispredict && io.update_idx === io.f2_req_idx) { f2_entry.s_cnt := 0.U } val f3_entry = RegNext(f2_entry) val f3_scnt = Mux(io.update_repair && io.update_idx === RegNext(io.f2_req_idx), io.update_meta.s_cnt, f3_entry.s_cnt) val f3_tag = RegNext(io.f2_req_idx(tagSz+log2Ceil(nSets)-1,log2Ceil(nSets))) io.f3_pred := io.f3_pred_in io.f3_meta.s_cnt := f3_scnt when (f3_entry.tag === f3_tag) { when (f3_scnt === f3_entry.p_cnt && f3_entry.conf === 7.U) { io.f3_pred := !io.f3_pred_in } } val f4_fire = RegNext(io.f3_req_fire) val f4_entry = RegNext(f3_entry) val f4_tag = RegNext(f3_tag) val f4_scnt = RegNext(f3_scnt) val f4_idx = RegNext(RegNext(io.f2_req_idx)) when (f4_fire) { when (f4_entry.tag === f4_tag) { when (f4_scnt === f4_entry.p_cnt && f4_entry.conf === 7.U) { entries(f4_idx).age := 7.U entries(f4_idx).s_cnt := 0.U } .otherwise { entries(f4_idx).s_cnt := f4_scnt + 1.U entries(f4_idx).age := Mux(f4_entry.age === 7.U, 7.U, f4_entry.age + 1.U) } } } val entry = entries(io.update_idx) val tag = io.update_idx(tagSz+log2Ceil(nSets)-1,log2Ceil(nSets)) val tag_match = entry.tag === tag val ctr_match = entry.p_cnt === io.update_meta.s_cnt val wentry = WireInit(entry) when (io.update_mispredict && !doing_reset) { // Learned, tag match -> decrement confidence when (entry.conf === 7.U && tag_match) { wentry.s_cnt := 0.U wentry.conf := entry.conf - 1.U // Learned, no tag match -> do nothing? Don't evict super-confident entries? } .elsewhen (entry.conf === 7.U && !tag_match) { // Confident, tag match, ctr_match -> increment confidence, reset counter } .elsewhen (entry.conf =/= 0.U && tag_match && ctr_match) { wentry.conf := entry.conf + 1.U wentry.s_cnt := 0.U // Confident, tag match, no ctr match -> zero confidence, reset counter, set previous counter } .elsewhen (entry.conf =/= 0.U && tag_match && !ctr_match) { wentry.conf := 0.U wentry.s_cnt := 0.U wentry.p_cnt := io.update_meta.s_cnt // Confident, no tag match, age is 0 -> replace this entry with our own, set our age high to avoid ping-pong } .elsewhen (entry.conf =/= 0.U && !tag_match && entry.age === 0.U) { wentry.tag := tag wentry.conf := 1.U wentry.s_cnt := 0.U wentry.p_cnt := io.update_meta.s_cnt // Confident, no tag match, age > 0 -> decrement age } .elsewhen (entry.conf =/= 0.U && !tag_match && entry.age =/= 0.U) { wentry.age := entry.age - 1.U // Unconfident, tag match, ctr match -> increment confidence } .elsewhen (entry.conf === 0.U && tag_match && ctr_match) { wentry.conf := 1.U wentry.age := 7.U wentry.s_cnt := 0.U // Unconfident, tag match, no ctr match -> set previous counter } .elsewhen (entry.conf === 0.U && tag_match && !ctr_match) { wentry.p_cnt := io.update_meta.s_cnt wentry.age := 7.U wentry.s_cnt := 0.U // Unconfident, no tag match -> set previous counter and tag } .elsewhen (entry.conf === 0.U && !tag_match) { wentry.tag := tag wentry.conf := 1.U wentry.age := 7.U wentry.s_cnt := 0.U wentry.p_cnt := io.update_meta.s_cnt } entries(io.update_idx) := wentry } .elsewhen (io.update_repair && !doing_reset) { when (tag_match && !(f4_fire && io.update_idx === f4_idx)) { wentry.s_cnt := io.update_meta.s_cnt entries(io.update_idx) := wentry } } when (doing_reset) { entries(reset_idx) := (0.U).asTypeOf(new LoopEntry) } } val columns = Seq.fill(bankWidth) { Module(new LoopBranchPredictorColumn) } val mems = Nil // TODO fix val f3_meta = Wire(Vec(bankWidth, new LoopMeta)) override val metaSz = f3_meta.asUInt.getWidth val update_meta = s1_update.bits.meta.asTypeOf(Vec(bankWidth, new LoopMeta)) for (w <- 0 until bankWidth) { columns(w).io.f2_req_valid := s2_valid columns(w).io.f2_req_idx := s2_idx columns(w).io.f3_req_fire := (s3_valid && s3_mask(w) && io.f3_fire && RegNext(io.resp_in(0).f2(w).predicted_pc.valid && io.resp_in(0).f2(w).is_br)) columns(w).io.f3_pred_in := io.resp_in(0).f3(w).taken io.resp.f3(w).taken := columns(w).io.f3_pred columns(w).io.update_mispredict := (s1_update.valid && s1_update.bits.br_mask(w) && s1_update.bits.is_mispredict_update && s1_update.bits.cfi_mispredicted) columns(w).io.update_repair := (s1_update.valid && s1_update.bits.br_mask(w) && s1_update.bits.is_repair_update) columns(w).io.update_idx := s1_update_idx columns(w).io.update_resolve_dir := s1_update.bits.cfi_taken columns(w).io.update_meta := update_meta(w) f3_meta(w) := columns(w).io.f3_meta } io.f3_meta := f3_meta.asUInt }
module LoopBranchPredictorColumn_3( // @[loop.scala:39:9] input clock, // @[loop.scala:39:9] input reset, // @[loop.scala:39:9] input io_f2_req_valid, // @[loop.scala:43:16] input [35:0] io_f2_req_idx, // @[loop.scala:43:16] input io_f3_req_fire, // @[loop.scala:43:16] input io_f3_pred_in, // @[loop.scala:43:16] output io_f3_pred, // @[loop.scala:43:16] output [9:0] io_f3_meta_s_cnt, // @[loop.scala:43:16] input io_update_mispredict, // @[loop.scala:43:16] input io_update_repair, // @[loop.scala:43:16] input [35:0] io_update_idx, // @[loop.scala:43:16] input io_update_resolve_dir, // @[loop.scala:43:16] input [9:0] io_update_meta_s_cnt // @[loop.scala:43:16] ); wire io_f2_req_valid_0 = io_f2_req_valid; // @[loop.scala:39:9] wire [35:0] io_f2_req_idx_0 = io_f2_req_idx; // @[loop.scala:39:9] wire io_f3_req_fire_0 = io_f3_req_fire; // @[loop.scala:39:9] wire io_f3_pred_in_0 = io_f3_pred_in; // @[loop.scala:39:9] wire io_update_mispredict_0 = io_update_mispredict; // @[loop.scala:39:9] wire io_update_repair_0 = io_update_repair; // @[loop.scala:39:9] wire [35:0] io_update_idx_0 = io_update_idx; // @[loop.scala:39:9] wire io_update_resolve_dir_0 = io_update_resolve_dir; // @[loop.scala:39:9] wire [9:0] io_update_meta_s_cnt_0 = io_update_meta_s_cnt; // @[loop.scala:39:9] wire [2:0] _entries_WIRE_conf = 3'h0; // @[loop.scala:176:43] wire [2:0] _entries_WIRE_age = 3'h0; // @[loop.scala:176:43] wire [9:0] _entries_WIRE_tag = 10'h0; // @[loop.scala:176:43] wire [9:0] _entries_WIRE_p_cnt = 10'h0; // @[loop.scala:176:43] wire [9:0] _entries_WIRE_s_cnt = 10'h0; // @[loop.scala:176:43] wire [35:0] _f2_entry_T = io_f2_req_idx_0; // @[loop.scala:39:9] wire [9:0] f3_scnt; // @[loop.scala:73:23] wire [35:0] _entry_T = io_update_idx_0; // @[loop.scala:39:9] wire [9:0] io_f3_meta_s_cnt_0; // @[loop.scala:39:9] wire io_f3_pred_0; // @[loop.scala:39:9] reg doing_reset; // @[loop.scala:59:30] reg [3:0] reset_idx; // @[loop.scala:60:28] wire [4:0] _reset_idx_T = {1'h0, reset_idx} + {4'h0, doing_reset}; // @[loop.scala:59:30, :60:28, :61:28] wire [3:0] _reset_idx_T_1 = _reset_idx_T[3:0]; // @[loop.scala:61:28] reg [9:0] entries_0_tag; // @[loop.scala:65:22] reg [2:0] entries_0_conf; // @[loop.scala:65:22] reg [2:0] entries_0_age; // @[loop.scala:65:22] reg [9:0] entries_0_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_0_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_1_tag; // @[loop.scala:65:22] reg [2:0] entries_1_conf; // @[loop.scala:65:22] reg [2:0] entries_1_age; // @[loop.scala:65:22] reg [9:0] entries_1_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_1_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_2_tag; // @[loop.scala:65:22] reg [2:0] entries_2_conf; // @[loop.scala:65:22] reg [2:0] entries_2_age; // @[loop.scala:65:22] reg [9:0] entries_2_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_2_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_3_tag; // @[loop.scala:65:22] reg [2:0] entries_3_conf; // @[loop.scala:65:22] reg [2:0] entries_3_age; // @[loop.scala:65:22] reg [9:0] entries_3_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_3_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_4_tag; // @[loop.scala:65:22] reg [2:0] entries_4_conf; // @[loop.scala:65:22] reg [2:0] entries_4_age; // @[loop.scala:65:22] reg [9:0] entries_4_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_4_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_5_tag; // @[loop.scala:65:22] reg [2:0] entries_5_conf; // @[loop.scala:65:22] reg [2:0] entries_5_age; // @[loop.scala:65:22] reg [9:0] entries_5_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_5_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_6_tag; // @[loop.scala:65:22] reg [2:0] entries_6_conf; // @[loop.scala:65:22] reg [2:0] entries_6_age; // @[loop.scala:65:22] reg [9:0] entries_6_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_6_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_7_tag; // @[loop.scala:65:22] reg [2:0] entries_7_conf; // @[loop.scala:65:22] reg [2:0] entries_7_age; // @[loop.scala:65:22] reg [9:0] entries_7_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_7_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_8_tag; // @[loop.scala:65:22] reg [2:0] entries_8_conf; // @[loop.scala:65:22] reg [2:0] entries_8_age; // @[loop.scala:65:22] reg [9:0] entries_8_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_8_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_9_tag; // @[loop.scala:65:22] reg [2:0] entries_9_conf; // @[loop.scala:65:22] reg [2:0] entries_9_age; // @[loop.scala:65:22] reg [9:0] entries_9_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_9_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_10_tag; // @[loop.scala:65:22] reg [2:0] entries_10_conf; // @[loop.scala:65:22] reg [2:0] entries_10_age; // @[loop.scala:65:22] reg [9:0] entries_10_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_10_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_11_tag; // @[loop.scala:65:22] reg [2:0] entries_11_conf; // @[loop.scala:65:22] reg [2:0] entries_11_age; // @[loop.scala:65:22] reg [9:0] entries_11_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_11_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_12_tag; // @[loop.scala:65:22] reg [2:0] entries_12_conf; // @[loop.scala:65:22] reg [2:0] entries_12_age; // @[loop.scala:65:22] reg [9:0] entries_12_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_12_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_13_tag; // @[loop.scala:65:22] reg [2:0] entries_13_conf; // @[loop.scala:65:22] reg [2:0] entries_13_age; // @[loop.scala:65:22] reg [9:0] entries_13_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_13_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_14_tag; // @[loop.scala:65:22] reg [2:0] entries_14_conf; // @[loop.scala:65:22] reg [2:0] entries_14_age; // @[loop.scala:65:22] reg [9:0] entries_14_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_14_s_cnt; // @[loop.scala:65:22] reg [9:0] entries_15_tag; // @[loop.scala:65:22] reg [2:0] entries_15_conf; // @[loop.scala:65:22] reg [2:0] entries_15_age; // @[loop.scala:65:22] reg [9:0] entries_15_p_cnt; // @[loop.scala:65:22] reg [9:0] entries_15_s_cnt; // @[loop.scala:65:22] wire [3:0] _f2_entry_T_1 = _f2_entry_T[3:0]; wire [9:0] f2_entry_tag; // @[loop.scala:66:28] wire [2:0] f2_entry_conf; // @[loop.scala:66:28] wire [2:0] f2_entry_age; // @[loop.scala:66:28] wire [9:0] f2_entry_p_cnt; // @[loop.scala:66:28] wire [9:0] f2_entry_s_cnt; // @[loop.scala:66:28] wire [15:0][9:0] _GEN = {{entries_15_tag}, {entries_14_tag}, {entries_13_tag}, {entries_12_tag}, {entries_11_tag}, {entries_10_tag}, {entries_9_tag}, {entries_8_tag}, {entries_7_tag}, {entries_6_tag}, {entries_5_tag}, {entries_4_tag}, {entries_3_tag}, {entries_2_tag}, {entries_1_tag}, {entries_0_tag}}; // @[loop.scala:65:22, :66:28] assign f2_entry_tag = _GEN[_f2_entry_T_1]; // @[loop.scala:66:28] wire [15:0][2:0] _GEN_0 = {{entries_15_conf}, {entries_14_conf}, {entries_13_conf}, {entries_12_conf}, {entries_11_conf}, {entries_10_conf}, {entries_9_conf}, {entries_8_conf}, {entries_7_conf}, {entries_6_conf}, {entries_5_conf}, {entries_4_conf}, {entries_3_conf}, {entries_2_conf}, {entries_1_conf}, {entries_0_conf}}; // @[loop.scala:65:22, :66:28] assign f2_entry_conf = _GEN_0[_f2_entry_T_1]; // @[loop.scala:66:28] wire [15:0][2:0] _GEN_1 = {{entries_15_age}, {entries_14_age}, {entries_13_age}, {entries_12_age}, {entries_11_age}, {entries_10_age}, {entries_9_age}, {entries_8_age}, {entries_7_age}, {entries_6_age}, {entries_5_age}, {entries_4_age}, {entries_3_age}, {entries_2_age}, {entries_1_age}, {entries_0_age}}; // @[loop.scala:65:22, :66:28] assign f2_entry_age = _GEN_1[_f2_entry_T_1]; // @[loop.scala:66:28] wire [15:0][9:0] _GEN_2 = {{entries_15_p_cnt}, {entries_14_p_cnt}, {entries_13_p_cnt}, {entries_12_p_cnt}, {entries_11_p_cnt}, {entries_10_p_cnt}, {entries_9_p_cnt}, {entries_8_p_cnt}, {entries_7_p_cnt}, {entries_6_p_cnt}, {entries_5_p_cnt}, {entries_4_p_cnt}, {entries_3_p_cnt}, {entries_2_p_cnt}, {entries_1_p_cnt}, {entries_0_p_cnt}}; // @[loop.scala:65:22, :66:28] assign f2_entry_p_cnt = _GEN_2[_f2_entry_T_1]; // @[loop.scala:66:28] wire [15:0][9:0] _GEN_3 = {{entries_15_s_cnt}, {entries_14_s_cnt}, {entries_13_s_cnt}, {entries_12_s_cnt}, {entries_11_s_cnt}, {entries_10_s_cnt}, {entries_9_s_cnt}, {entries_8_s_cnt}, {entries_7_s_cnt}, {entries_6_s_cnt}, {entries_5_s_cnt}, {entries_4_s_cnt}, {entries_3_s_cnt}, {entries_2_s_cnt}, {entries_1_s_cnt}, {entries_0_s_cnt}}; // @[loop.scala:65:22, :66:28] wire _T_3 = io_update_idx_0 == io_f2_req_idx_0; // @[loop.scala:39:9, :67:45] assign f2_entry_s_cnt = io_update_repair_0 & _T_3 ? io_update_meta_s_cnt_0 : io_update_mispredict_0 & _T_3 ? 10'h0 : _GEN_3[_f2_entry_T_1]; // @[loop.scala:39:9, :66:28, :67:{28,45,64}, :68:22, :69:{39,75}, :70:22] reg [9:0] f3_entry_tag; // @[loop.scala:72:27] reg [2:0] f3_entry_conf; // @[loop.scala:72:27] reg [2:0] f3_entry_age; // @[loop.scala:72:27] reg [9:0] f3_entry_p_cnt; // @[loop.scala:72:27] reg [9:0] f3_entry_s_cnt; // @[loop.scala:72:27] reg [35:0] f3_scnt_REG; // @[loop.scala:73:69] wire _f3_scnt_T = io_update_idx_0 == f3_scnt_REG; // @[loop.scala:39:9, :73:{58,69}] wire _f3_scnt_T_1 = io_update_repair_0 & _f3_scnt_T; // @[loop.scala:39:9, :73:{41,58}] assign f3_scnt = _f3_scnt_T_1 ? io_update_meta_s_cnt_0 : f3_entry_s_cnt; // @[loop.scala:39:9, :72:27, :73:{23,41}] assign io_f3_meta_s_cnt_0 = f3_scnt; // @[loop.scala:39:9, :73:23] wire [9:0] _f3_tag_T = io_f2_req_idx_0[13:4]; // @[loop.scala:39:9, :76:41] reg [9:0] f3_tag; // @[loop.scala:76:27] wire _io_f3_pred_T = ~io_f3_pred_in_0; // @[loop.scala:39:9, :83:23] assign io_f3_pred_0 = f3_entry_tag == f3_tag & f3_scnt == f3_entry_p_cnt & (&f3_entry_conf) ? _io_f3_pred_T : io_f3_pred_in_0; // @[loop.scala:39:9, :72:27, :73:23, :76:27, :78:16, :81:{24,36}, :82:{21,40,57,66}, :83:{20,23}] reg f4_fire; // @[loop.scala:88:27] reg [9:0] f4_entry_tag; // @[loop.scala:89:27] reg [2:0] f4_entry_conf; // @[loop.scala:89:27] reg [2:0] f4_entry_age; // @[loop.scala:89:27] reg [9:0] f4_entry_p_cnt; // @[loop.scala:89:27] reg [9:0] f4_entry_s_cnt; // @[loop.scala:89:27] reg [9:0] f4_tag; // @[loop.scala:90:27] reg [9:0] f4_scnt; // @[loop.scala:91:27] reg [35:0] f4_idx_REG; // @[loop.scala:92:35] reg [35:0] f4_idx; // @[loop.scala:92:27] wire [10:0] _entries_s_cnt_T = {1'h0, f4_scnt} + 11'h1; // @[loop.scala:91:27, :101:44] wire [9:0] _entries_s_cnt_T_1 = _entries_s_cnt_T[9:0]; // @[loop.scala:101:44] wire _entries_age_T = &f4_entry_age; // @[loop.scala:89:27, :102:53] wire [3:0] _entries_age_T_1 = {1'h0, f4_entry_age} + 4'h1; // @[loop.scala:89:27, :102:80] wire [2:0] _entries_age_T_2 = _entries_age_T_1[2:0]; // @[loop.scala:102:80] wire [2:0] _entries_age_T_3 = _entries_age_T ? 3'h7 : _entries_age_T_2; // @[loop.scala:102:{39,53,80}] wire [3:0] _entry_T_1 = _entry_T[3:0]; wire [9:0] tag = io_update_idx_0[13:4]; // @[loop.scala:39:9, :109:28] wire tag_match = _GEN[_entry_T_1] == tag; // @[loop.scala:66:28, :109:28, :110:31] wire ctr_match = _GEN_2[_entry_T_1] == io_update_meta_s_cnt_0; // @[loop.scala:39:9, :66:28, :110:31, :111:33] wire [9:0] wentry_tag; // @[loop.scala:112:26] wire [2:0] wentry_conf; // @[loop.scala:112:26] wire [2:0] wentry_age; // @[loop.scala:112:26] wire [9:0] wentry_p_cnt; // @[loop.scala:112:26] wire [9:0] wentry_s_cnt; // @[loop.scala:112:26] wire _T_22 = io_update_mispredict_0 & ~doing_reset; // @[loop.scala:39:9, :59:30, :114:{32,35}] wire _T_24 = (&_GEN_0[_entry_T_1]) & tag_match; // @[loop.scala:66:28, :110:31, :117:{24,32}] wire [3:0] _GEN_4 = {1'h0, _GEN_0[_entry_T_1]}; // @[loop.scala:66:28, :110:31, :119:36] wire [3:0] _wentry_conf_T = _GEN_4 - 4'h1; // @[loop.scala:119:36] wire [2:0] _wentry_conf_T_1 = _wentry_conf_T[2:0]; // @[loop.scala:119:36] wire _T_27 = (&_GEN_0[_entry_T_1]) & ~tag_match; // @[loop.scala:66:28, :110:31, :117:24, :122:{39,42}] wire _T_30 = (|_GEN_0[_entry_T_1]) & tag_match & ctr_match; // @[loop.scala:66:28, :110:31, :111:33, :125:{31,39,52}] wire [3:0] _wentry_conf_T_2 = _GEN_4 + 4'h1; // @[loop.scala:102:80, :119:36, :126:36] wire [2:0] _wentry_conf_T_3 = _wentry_conf_T_2[2:0]; // @[loop.scala:126:36] wire _T_34 = (|_GEN_0[_entry_T_1]) & tag_match & ~ctr_match; // @[loop.scala:66:28, :110:31, :111:33, :125:31, :130:{39,52,55}] wire _T_39 = (|_GEN_0[_entry_T_1]) & ~tag_match & _GEN_1[_entry_T_1] == 3'h0; // @[loop.scala:66:28, :110:31, :122:42, :125:31, :136:{39,53,66}] wire _T_44 = (|_GEN_0[_entry_T_1]) & ~tag_match & (|_GEN_1[_entry_T_1]); // @[loop.scala:66:28, :110:31, :122:42, :125:31, :143:{39,53,66}] wire [3:0] _wentry_age_T = {1'h0, _GEN_1[_entry_T_1]} - 4'h1; // @[loop.scala:66:28, :110:31, :144:33] wire [2:0] _wentry_age_T_1 = _wentry_age_T[2:0]; // @[loop.scala:144:33] wire _T_52 = _GEN_0[_entry_T_1] == 3'h0; // @[loop.scala:66:28, :110:31, :147:31] wire _T_47 = _T_52 & tag_match & ctr_match; // @[loop.scala:110:31, :111:33, :147:{31,39,52}] wire _T_51 = _T_52 & tag_match & ~ctr_match; // @[loop.scala:110:31, :111:33, :130:55, :147:31, :153:{39,52}] wire _T_54 = _T_52 & ~tag_match; // @[loop.scala:110:31, :122:42, :147:31, :159:39] wire _GEN_5 = _T_47 | _T_51; // @[loop.scala:112:26, :147:{39,52,66}, :153:{39,52,67}, :159:54] wire _GEN_6 = _T_30 | _T_34; // @[loop.scala:112:26, :125:{39,52,66}, :130:{39,52,67}, :136:75] assign wentry_tag = ~_T_22 | _T_24 | _T_27 | _GEN_6 | ~(_T_39 | ~(_T_44 | _GEN_5 | ~_T_54)) ? _GEN[_entry_T_1] : tag; // @[loop.scala:66:28, :109:28, :110:31, :112:26, :114:{32,49}, :117:{32,46}, :122:{39,54}, :125:66, :130:67, :136:{39,53,75}, :137:22, :143:{39,53,75}, :147:66, :153:67, :159:{39,54}] assign wentry_conf = _T_22 ? (_T_24 ? _wentry_conf_T_1 : _T_27 ? _GEN_0[_entry_T_1] : _T_30 ? _wentry_conf_T_3 : _T_34 ? 3'h0 : _T_39 | ~(_T_44 | ~(_T_47 | ~(_T_51 | ~_T_54))) ? 3'h1 : _GEN_0[_entry_T_1]) : _GEN_0[_entry_T_1]; // @[loop.scala:66:28, :110:31, :112:26, :114:{32,49}, :117:{32,46}, :119:{22,36}, :122:{39,54}, :125:{39,52,66}, :126:{22,36}, :130:{39,52,67}, :131:22, :136:{39,53,75}, :138:22, :143:{39,53,75}, :147:{39,52,66}, :148:22, :153:{39,52,67}, :159:{39,54}] wire _GEN_7 = _T_51 | _T_54; // @[loop.scala:112:26, :153:{39,52,67}, :155:22, :159:{39,54}, :162:22] wire _GEN_8 = _T_34 | _T_39; // @[loop.scala:112:26, :130:{39,52,67}, :136:{39,53,75}, :143:75] assign wentry_age = ~_T_22 | _T_24 | _T_27 | _T_30 | _GEN_8 ? _GEN_1[_entry_T_1] : _T_44 ? _wentry_age_T_1 : _T_47 | _GEN_7 ? 3'h7 : _GEN_1[_entry_T_1]; // @[loop.scala:66:28, :110:31, :112:26, :114:{32,49}, :117:{32,46}, :122:{39,54}, :125:{39,52,66}, :130:67, :136:75, :143:{39,53,75}, :144:{20,33}, :147:{39,52,66}, :149:22, :153:67, :155:22, :159:54, :162:22] assign wentry_p_cnt = ~_T_22 | _T_24 | _T_27 | _T_30 | ~(_GEN_8 | ~(_T_44 | _T_47 | ~_GEN_7)) ? _GEN_2[_entry_T_1] : io_update_meta_s_cnt_0; // @[loop.scala:39:9, :66:28, :110:31, :112:26, :114:{32,49}, :117:{32,46}, :122:{39,54}, :125:{39,52,66}, :130:67, :133:22, :136:75, :140:22, :143:{39,53,75}, :147:{39,52,66}, :153:67, :155:22, :159:54, :162:22] wire _T_58 = io_update_repair_0 & ~doing_reset; // @[loop.scala:39:9, :59:30, :114:35, :168:35] wire _T_62 = tag_match & ~(f4_fire & io_update_idx_0 == f4_idx); // @[loop.scala:39:9, :88:27, :92:27, :110:31, :169:{23,26,36,53}] assign wentry_s_cnt = _T_22 ? (_T_24 | ~(_T_27 | ~(_GEN_6 | _T_39 | ~(_T_44 | ~(_GEN_5 | _T_54)))) ? 10'h0 : _GEN_3[_entry_T_1]) : _T_58 & _T_62 ? io_update_meta_s_cnt_0 : _GEN_3[_entry_T_1]; // @[loop.scala:39:9, :66:28, :110:31, :112:26, :114:{32,49}, :117:{32,46}, :118:22, :122:{39,54}, :125:66, :127:22, :130:67, :132:22, :136:{39,53,75}, :139:22, :143:{39,53,75}, :147:66, :150:22, :153:67, :156:22, :159:{39,54}, :163:22, :168:{35,52}, :169:{23,66}, :170:22] wire _T_12 = f4_scnt == f4_entry_p_cnt & (&f4_entry_conf); // @[loop.scala:89:27, :91:27, :97:{23,42,59}] wire _GEN_9 = f4_fire & f4_entry_tag == f4_tag; // @[loop.scala:65:22, :88:27, :89:27, :90:27, :95:20, :96:{26,38}, :97:68] always @(posedge clock) begin // @[loop.scala:39:9] if (reset) begin // @[loop.scala:39:9] doing_reset <= 1'h1; // @[loop.scala:59:30] reset_idx <= 4'h0; // @[loop.scala:60:28] end else begin // @[loop.scala:39:9] doing_reset <= reset_idx != 4'hF & doing_reset; // @[loop.scala:59:30, :60:28, :62:{21,38,52}] reset_idx <= _reset_idx_T_1; // @[loop.scala:60:28, :61:28] end if (doing_reset & reset_idx == 4'h0) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_0_tag <= 10'h0; // @[loop.scala:65:22] entries_0_conf <= 3'h0; // @[loop.scala:65:22] entries_0_age <= 3'h0; // @[loop.scala:65:22] entries_0_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_0_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'h0 : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'h0) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_0_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_0_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_0_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_0_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_0_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h0) // @[loop.scala:92:27, :98:33] entries_0_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'h0) // @[loop.scala:92:27, :99:33] entries_0_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h0) // @[loop.scala:92:27, :102:33] entries_0_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'h0) // @[loop.scala:92:27, :101:33] entries_0_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'h1) begin // @[loop.scala:59:30, :60:28, :102:80, :114:49, :175:24, :176:26] entries_1_tag <= 10'h0; // @[loop.scala:65:22] entries_1_conf <= 3'h0; // @[loop.scala:65:22] entries_1_age <= 3'h0; // @[loop.scala:65:22] entries_1_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_1_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'h1 : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'h1) begin // @[loop.scala:39:9, :65:22, :95:20, :102:80, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_1_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_1_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_1_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_1_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_1_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h1) // @[loop.scala:92:27, :98:33, :102:80] entries_1_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'h1) // @[loop.scala:92:27, :99:33, :102:80] entries_1_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h1) // @[loop.scala:92:27, :102:{33,80}] entries_1_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'h1) // @[loop.scala:92:27, :101:33, :102:80] entries_1_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'h2) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_2_tag <= 10'h0; // @[loop.scala:65:22] entries_2_conf <= 3'h0; // @[loop.scala:65:22] entries_2_age <= 3'h0; // @[loop.scala:65:22] entries_2_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_2_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'h2 : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'h2) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_2_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_2_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_2_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_2_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_2_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h2) // @[loop.scala:92:27, :98:33] entries_2_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'h2) // @[loop.scala:92:27, :99:33] entries_2_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h2) // @[loop.scala:92:27, :102:33] entries_2_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'h2) // @[loop.scala:92:27, :101:33] entries_2_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'h3) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_3_tag <= 10'h0; // @[loop.scala:65:22] entries_3_conf <= 3'h0; // @[loop.scala:65:22] entries_3_age <= 3'h0; // @[loop.scala:65:22] entries_3_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_3_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'h3 : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'h3) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_3_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_3_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_3_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_3_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_3_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h3) // @[loop.scala:92:27, :98:33] entries_3_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'h3) // @[loop.scala:92:27, :99:33] entries_3_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h3) // @[loop.scala:92:27, :102:33] entries_3_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'h3) // @[loop.scala:92:27, :101:33] entries_3_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'h4) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_4_tag <= 10'h0; // @[loop.scala:65:22] entries_4_conf <= 3'h0; // @[loop.scala:65:22] entries_4_age <= 3'h0; // @[loop.scala:65:22] entries_4_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_4_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'h4 : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'h4) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_4_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_4_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_4_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_4_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_4_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h4) // @[loop.scala:92:27, :98:33] entries_4_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'h4) // @[loop.scala:92:27, :99:33] entries_4_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h4) // @[loop.scala:92:27, :102:33] entries_4_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'h4) // @[loop.scala:92:27, :101:33] entries_4_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'h5) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_5_tag <= 10'h0; // @[loop.scala:65:22] entries_5_conf <= 3'h0; // @[loop.scala:65:22] entries_5_age <= 3'h0; // @[loop.scala:65:22] entries_5_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_5_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'h5 : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'h5) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_5_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_5_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_5_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_5_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_5_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h5) // @[loop.scala:92:27, :98:33] entries_5_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'h5) // @[loop.scala:92:27, :99:33] entries_5_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h5) // @[loop.scala:92:27, :102:33] entries_5_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'h5) // @[loop.scala:92:27, :101:33] entries_5_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'h6) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_6_tag <= 10'h0; // @[loop.scala:65:22] entries_6_conf <= 3'h0; // @[loop.scala:65:22] entries_6_age <= 3'h0; // @[loop.scala:65:22] entries_6_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_6_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'h6 : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'h6) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_6_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_6_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_6_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_6_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_6_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h6) // @[loop.scala:92:27, :98:33] entries_6_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'h6) // @[loop.scala:92:27, :99:33] entries_6_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h6) // @[loop.scala:92:27, :102:33] entries_6_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'h6) // @[loop.scala:92:27, :101:33] entries_6_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'h7) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_7_tag <= 10'h0; // @[loop.scala:65:22] entries_7_conf <= 3'h0; // @[loop.scala:65:22] entries_7_age <= 3'h0; // @[loop.scala:65:22] entries_7_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_7_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'h7 : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'h7) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_7_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_7_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_7_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_7_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_7_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h7) // @[loop.scala:92:27, :98:33] entries_7_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'h7) // @[loop.scala:92:27, :99:33] entries_7_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h7) // @[loop.scala:92:27, :102:33] entries_7_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'h7) // @[loop.scala:92:27, :101:33] entries_7_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'h8) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_8_tag <= 10'h0; // @[loop.scala:65:22] entries_8_conf <= 3'h0; // @[loop.scala:65:22] entries_8_age <= 3'h0; // @[loop.scala:65:22] entries_8_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_8_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'h8 : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'h8) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_8_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_8_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_8_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_8_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_8_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h8) // @[loop.scala:92:27, :98:33] entries_8_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'h8) // @[loop.scala:92:27, :99:33] entries_8_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h8) // @[loop.scala:92:27, :102:33] entries_8_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'h8) // @[loop.scala:92:27, :101:33] entries_8_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'h9) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_9_tag <= 10'h0; // @[loop.scala:65:22] entries_9_conf <= 3'h0; // @[loop.scala:65:22] entries_9_age <= 3'h0; // @[loop.scala:65:22] entries_9_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_9_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'h9 : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'h9) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_9_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_9_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_9_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_9_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_9_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h9) // @[loop.scala:92:27, :98:33] entries_9_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'h9) // @[loop.scala:92:27, :99:33] entries_9_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'h9) // @[loop.scala:92:27, :102:33] entries_9_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'h9) // @[loop.scala:92:27, :101:33] entries_9_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'hA) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_10_tag <= 10'h0; // @[loop.scala:65:22] entries_10_conf <= 3'h0; // @[loop.scala:65:22] entries_10_age <= 3'h0; // @[loop.scala:65:22] entries_10_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_10_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'hA : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'hA) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_10_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_10_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_10_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_10_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_10_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'hA) // @[loop.scala:92:27, :98:33] entries_10_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'hA) // @[loop.scala:92:27, :99:33] entries_10_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'hA) // @[loop.scala:92:27, :102:33] entries_10_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'hA) // @[loop.scala:92:27, :101:33] entries_10_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'hB) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_11_tag <= 10'h0; // @[loop.scala:65:22] entries_11_conf <= 3'h0; // @[loop.scala:65:22] entries_11_age <= 3'h0; // @[loop.scala:65:22] entries_11_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_11_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'hB : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'hB) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_11_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_11_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_11_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_11_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_11_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'hB) // @[loop.scala:92:27, :98:33] entries_11_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'hB) // @[loop.scala:92:27, :99:33] entries_11_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'hB) // @[loop.scala:92:27, :102:33] entries_11_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'hB) // @[loop.scala:92:27, :101:33] entries_11_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'hC) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_12_tag <= 10'h0; // @[loop.scala:65:22] entries_12_conf <= 3'h0; // @[loop.scala:65:22] entries_12_age <= 3'h0; // @[loop.scala:65:22] entries_12_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_12_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'hC : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'hC) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_12_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_12_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_12_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_12_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_12_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'hC) // @[loop.scala:92:27, :98:33] entries_12_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'hC) // @[loop.scala:92:27, :99:33] entries_12_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'hC) // @[loop.scala:92:27, :102:33] entries_12_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'hC) // @[loop.scala:92:27, :101:33] entries_12_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'hD) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_13_tag <= 10'h0; // @[loop.scala:65:22] entries_13_conf <= 3'h0; // @[loop.scala:65:22] entries_13_age <= 3'h0; // @[loop.scala:65:22] entries_13_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_13_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'hD : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'hD) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_13_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_13_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_13_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_13_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_13_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'hD) // @[loop.scala:92:27, :98:33] entries_13_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'hD) // @[loop.scala:92:27, :99:33] entries_13_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'hD) // @[loop.scala:92:27, :102:33] entries_13_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'hD) // @[loop.scala:92:27, :101:33] entries_13_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & reset_idx == 4'hE) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_14_tag <= 10'h0; // @[loop.scala:65:22] entries_14_conf <= 3'h0; // @[loop.scala:65:22] entries_14_age <= 3'h0; // @[loop.scala:65:22] entries_14_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_14_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? io_update_idx_0[3:0] == 4'hE : _T_58 & _T_62 & io_update_idx_0[3:0] == 4'hE) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_14_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_14_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_14_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_14_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_14_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'hE) // @[loop.scala:92:27, :98:33] entries_14_age <= 3'h7; // @[loop.scala:65:22] if (f4_idx[3:0] == 4'hE) // @[loop.scala:92:27, :99:33] entries_14_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (f4_idx[3:0] == 4'hE) // @[loop.scala:92:27, :102:33] entries_14_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (f4_idx[3:0] == 4'hE) // @[loop.scala:92:27, :101:33] entries_14_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end if (doing_reset & (&reset_idx)) begin // @[loop.scala:59:30, :60:28, :114:49, :175:24, :176:26] entries_15_tag <= 10'h0; // @[loop.scala:65:22] entries_15_conf <= 3'h0; // @[loop.scala:65:22] entries_15_age <= 3'h0; // @[loop.scala:65:22] entries_15_p_cnt <= 10'h0; // @[loop.scala:65:22] entries_15_s_cnt <= 10'h0; // @[loop.scala:65:22] end else if (_T_22 ? (&(io_update_idx_0[3:0])) : _T_58 & _T_62 & (&(io_update_idx_0[3:0]))) begin // @[loop.scala:39:9, :65:22, :95:20, :114:{32,49}, :167:30, :168:{35,52}, :169:{23,66}, :171:32] entries_15_tag <= wentry_tag; // @[loop.scala:65:22, :112:26] entries_15_conf <= wentry_conf; // @[loop.scala:65:22, :112:26] entries_15_age <= wentry_age; // @[loop.scala:65:22, :112:26] entries_15_p_cnt <= wentry_p_cnt; // @[loop.scala:65:22, :112:26] entries_15_s_cnt <= wentry_s_cnt; // @[loop.scala:65:22, :112:26] end else if (_GEN_9) begin // @[loop.scala:65:22, :95:20, :96:38, :97:68] if (_T_12) begin // @[loop.scala:97:42] if (&(f4_idx[3:0])) // @[loop.scala:92:27, :98:33] entries_15_age <= 3'h7; // @[loop.scala:65:22] if (&(f4_idx[3:0])) // @[loop.scala:92:27, :99:33] entries_15_s_cnt <= 10'h0; // @[loop.scala:65:22] end else begin // @[loop.scala:97:42] if (&(f4_idx[3:0])) // @[loop.scala:92:27, :102:33] entries_15_age <= _entries_age_T_3; // @[loop.scala:65:22, :102:39] if (&(f4_idx[3:0])) // @[loop.scala:92:27, :101:33] entries_15_s_cnt <= _entries_s_cnt_T_1; // @[loop.scala:65:22, :101:44] end end f3_entry_tag <= f2_entry_tag; // @[loop.scala:66:28, :72:27] f3_entry_conf <= f2_entry_conf; // @[loop.scala:66:28, :72:27] f3_entry_age <= f2_entry_age; // @[loop.scala:66:28, :72:27] f3_entry_p_cnt <= f2_entry_p_cnt; // @[loop.scala:66:28, :72:27] f3_entry_s_cnt <= f2_entry_s_cnt; // @[loop.scala:66:28, :72:27] f3_scnt_REG <= io_f2_req_idx_0; // @[loop.scala:39:9, :73:69] f3_tag <= _f3_tag_T; // @[loop.scala:76:{27,41}] f4_fire <= io_f3_req_fire_0; // @[loop.scala:39:9, :88:27] f4_entry_tag <= f3_entry_tag; // @[loop.scala:72:27, :89:27] f4_entry_conf <= f3_entry_conf; // @[loop.scala:72:27, :89:27] f4_entry_age <= f3_entry_age; // @[loop.scala:72:27, :89:27] f4_entry_p_cnt <= f3_entry_p_cnt; // @[loop.scala:72:27, :89:27] f4_entry_s_cnt <= f3_entry_s_cnt; // @[loop.scala:72:27, :89:27] f4_tag <= f3_tag; // @[loop.scala:76:27, :90:27] f4_scnt <= f3_scnt; // @[loop.scala:73:23, :91:27] f4_idx_REG <= io_f2_req_idx_0; // @[loop.scala:39:9, :92:35] f4_idx <= f4_idx_REG; // @[loop.scala:92:{27,35}] always @(posedge) assign io_f3_pred = io_f3_pred_0; // @[loop.scala:39:9] assign io_f3_meta_s_cnt = io_f3_meta_s_cnt_0; // @[loop.scala:39:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_63( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [8:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [20:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [8:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [8:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [20:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [8:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] c_first_beats1_decode = 3'h0; // @[Edges.scala:220:59] wire [2:0] c_first_beats1 = 3'h0; // @[Edges.scala:221:14] wire [2:0] _c_first_count_T = 3'h0; // @[Edges.scala:234:27] wire [2:0] c_first_count = 3'h0; // @[Edges.scala:234:25] wire [2:0] _c_first_counter_T = 3'h0; // @[Edges.scala:236:21] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_size = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_size = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_size = 3'h0; // @[Bundles.scala:265:61] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_5 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_11 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_15 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_17 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_21 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_23 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_29 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_31 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_35 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_37 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_53 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_55 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_59 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_61 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_65 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_67 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_71 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_73 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_79 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_81 = 1'h1; // @[Parameters.scala:57:20] wire _source_ok_T_85 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_87 = 1'h1; // @[Parameters.scala:57:20] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire [2:0] c_first_counter1 = 3'h7; // @[Edges.scala:230:28] wire [3:0] _c_first_counter1_T = 4'hF; // @[Edges.scala:230:28] wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [20:0] _c_first_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_first_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_first_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_first_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_set_wo_ready_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_set_wo_ready_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_opcodes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_opcodes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_sizes_set_interm_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_sizes_set_interm_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_opcodes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_opcodes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_sizes_set_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_sizes_set_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_probe_ack_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_probe_ack_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _c_probe_ack_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _c_probe_ack_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _same_cycle_resp_WIRE_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _same_cycle_resp_WIRE_1_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _same_cycle_resp_WIRE_2_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _same_cycle_resp_WIRE_3_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [20:0] _same_cycle_resp_WIRE_4_bits_address = 21'h0; // @[Bundles.scala:265:74] wire [20:0] _same_cycle_resp_WIRE_5_bits_address = 21'h0; // @[Bundles.scala:265:61] wire [8:0] _c_first_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_first_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_first_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_first_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_set_wo_ready_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_set_wo_ready_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_set_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_set_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_opcodes_set_interm_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_opcodes_set_interm_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_sizes_set_interm_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_sizes_set_interm_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_opcodes_set_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_opcodes_set_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_sizes_set_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_sizes_set_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_probe_ack_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_probe_ack_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _c_probe_ack_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _c_probe_ack_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _same_cycle_resp_WIRE_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _same_cycle_resp_WIRE_1_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _same_cycle_resp_WIRE_2_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _same_cycle_resp_WIRE_3_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [8:0] _same_cycle_resp_WIRE_4_bits_source = 9'h0; // @[Bundles.scala:265:74] wire [8:0] _same_cycle_resp_WIRE_5_bits_source = 9'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [4098:0] _c_opcodes_set_T_1 = 4099'h0; // @[Monitor.scala:767:54] wire [4098:0] _c_sizes_set_T_1 = 4099'h0; // @[Monitor.scala:768:52] wire [11:0] _c_opcodes_set_T = 12'h0; // @[Monitor.scala:767:79] wire [11:0] _c_sizes_set_T = 12'h0; // @[Monitor.scala:768:77] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] _c_sizes_set_interm_T_1 = 4'h1; // @[Monitor.scala:766:59] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] c_sizes_set_interm = 4'h0; // @[Monitor.scala:755:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [3:0] _c_sizes_set_interm_T = 4'h0; // @[Monitor.scala:766:51] wire [511:0] _c_set_wo_ready_T = 512'h1; // @[OneHot.scala:58:35] wire [511:0] _c_set_T = 512'h1; // @[OneHot.scala:58:35] wire [1027:0] c_opcodes_set = 1028'h0; // @[Monitor.scala:740:34] wire [1027:0] c_sizes_set = 1028'h0; // @[Monitor.scala:741:34] wire [256:0] c_set = 257'h0; // @[Monitor.scala:738:34] wire [256:0] c_set_wo_ready = 257'h0; // @[Monitor.scala:739:34] wire [5:0] _c_first_beats1_decode_T_2 = 6'h0; // @[package.scala:243:46] wire [5:0] _c_first_beats1_decode_T_1 = 6'h3F; // @[package.scala:243:76] wire [12:0] _c_first_beats1_decode_T = 13'h3F; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [2:0] _mask_sizeOH_T = io_in_a_bits_size_0; // @[Misc.scala:202:34] wire [8:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_9 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_10 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_11 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_12 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_13 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_14 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_15 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_16 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_17 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_18 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_19 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_20 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_21 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_22 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_23 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_24 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_25 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_26 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_27 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_28 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_29 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_30 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_31 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_32 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_33 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_34 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_35 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_36 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_37 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_38 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_39 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_40 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_41 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_42 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_43 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_44 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_45 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_46 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_47 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_48 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_49 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_50 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_51 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_52 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_53 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_54 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_55 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_56 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_57 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_58 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_59 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_60 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_61 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_62 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_63 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_64 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _uncommonBits_T_65 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_6 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_7 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_8 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_9 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_10 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [8:0] _source_ok_uncommonBits_T_11 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire _source_ok_T = io_in_a_bits_source_0 == 9'h90; // @[Monitor.scala:36:7] wire _source_ok_WIRE_0 = _source_ok_T; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits = _source_ok_uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [6:0] _source_ok_T_1 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_7 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_13 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_19 = io_in_a_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire _source_ok_T_2 = _source_ok_T_1 == 7'h20; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_4 = _source_ok_T_2; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_6 = _source_ok_T_4; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1 = _source_ok_T_6; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_8 = _source_ok_T_7 == 7'h21; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_10 = _source_ok_T_8; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_12 = _source_ok_T_10; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_2 = _source_ok_T_12; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_2 = _source_ok_uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_14 = _source_ok_T_13 == 7'h22; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_16 = _source_ok_T_14; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_18 = _source_ok_T_16; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_3 = _source_ok_T_18; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_3 = _source_ok_uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_20 = _source_ok_T_19 == 7'h23; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_22 = _source_ok_T_20; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_24 = _source_ok_T_22; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_4 = _source_ok_T_24; // @[Parameters.scala:1138:31] wire _source_ok_T_25 = io_in_a_bits_source_0 == 9'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_5 = _source_ok_T_25; // @[Parameters.scala:1138:31] wire _source_ok_T_26 = io_in_a_bits_source_0 == 9'h41; // @[Monitor.scala:36:7] wire _source_ok_WIRE_6 = _source_ok_T_26; // @[Parameters.scala:1138:31] wire [4:0] source_ok_uncommonBits_4 = _source_ok_uncommonBits_T_4[4:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_27 = io_in_a_bits_source_0[8:5]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_33 = io_in_a_bits_source_0[8:5]; // @[Monitor.scala:36:7] wire _source_ok_T_28 = _source_ok_T_27 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_30 = _source_ok_T_28; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_32 = _source_ok_T_30; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_7 = _source_ok_T_32; // @[Parameters.scala:1138:31] wire [4:0] source_ok_uncommonBits_5 = _source_ok_uncommonBits_T_5[4:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_34 = _source_ok_T_33 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_36 = _source_ok_T_34; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_38 = _source_ok_T_36; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_8 = _source_ok_T_38; // @[Parameters.scala:1138:31] wire _source_ok_T_39 = io_in_a_bits_source_0 == 9'h42; // @[Monitor.scala:36:7] wire _source_ok_WIRE_9 = _source_ok_T_39; // @[Parameters.scala:1138:31] wire _source_ok_T_40 = io_in_a_bits_source_0 == 9'h100; // @[Monitor.scala:36:7] wire _source_ok_WIRE_10 = _source_ok_T_40; // @[Parameters.scala:1138:31] wire _source_ok_T_41 = _source_ok_WIRE_0 | _source_ok_WIRE_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_42 = _source_ok_T_41 | _source_ok_WIRE_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_43 = _source_ok_T_42 | _source_ok_WIRE_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_44 = _source_ok_T_43 | _source_ok_WIRE_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_45 = _source_ok_T_44 | _source_ok_WIRE_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_46 = _source_ok_T_45 | _source_ok_WIRE_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_47 = _source_ok_T_46 | _source_ok_WIRE_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_48 = _source_ok_T_47 | _source_ok_WIRE_8; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_49 = _source_ok_T_48 | _source_ok_WIRE_9; // @[Parameters.scala:1138:31, :1139:46] wire source_ok = _source_ok_T_49 | _source_ok_WIRE_10; // @[Parameters.scala:1138:31, :1139:46] wire [12:0] _GEN = 13'h3F << io_in_a_bits_size_0; // @[package.scala:243:71] wire [12:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [12:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [20:0] _is_aligned_T = {15'h0, io_in_a_bits_address_0[5:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 21'h0; // @[Edges.scala:21:{16,24}] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = io_in_a_bits_size_0 > 3'h2; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [1:0] uncommonBits = _uncommonBits_T[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_1 = _uncommonBits_T_1[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_2 = _uncommonBits_T_2[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_3 = _uncommonBits_T_3[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_4 = _uncommonBits_T_4[4:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_5 = _uncommonBits_T_5[4:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_6 = _uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_7 = _uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_8 = _uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_9 = _uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_10 = _uncommonBits_T_10[4:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_11 = _uncommonBits_T_11[4:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_12 = _uncommonBits_T_12[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_13 = _uncommonBits_T_13[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_14 = _uncommonBits_T_14[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_15 = _uncommonBits_T_15[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_16 = _uncommonBits_T_16[4:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_17 = _uncommonBits_T_17[4:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_18 = _uncommonBits_T_18[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_19 = _uncommonBits_T_19[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_20 = _uncommonBits_T_20[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_21 = _uncommonBits_T_21[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_22 = _uncommonBits_T_22[4:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_23 = _uncommonBits_T_23[4:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_24 = _uncommonBits_T_24[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_25 = _uncommonBits_T_25[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_26 = _uncommonBits_T_26[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_27 = _uncommonBits_T_27[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_28 = _uncommonBits_T_28[4:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_29 = _uncommonBits_T_29[4:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_30 = _uncommonBits_T_30[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_31 = _uncommonBits_T_31[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_32 = _uncommonBits_T_32[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_33 = _uncommonBits_T_33[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_34 = _uncommonBits_T_34[4:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_35 = _uncommonBits_T_35[4:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_36 = _uncommonBits_T_36[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_37 = _uncommonBits_T_37[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_38 = _uncommonBits_T_38[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_39 = _uncommonBits_T_39[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_40 = _uncommonBits_T_40[4:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_41 = _uncommonBits_T_41[4:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_42 = _uncommonBits_T_42[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_43 = _uncommonBits_T_43[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_44 = _uncommonBits_T_44[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_45 = _uncommonBits_T_45[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_46 = _uncommonBits_T_46[4:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_47 = _uncommonBits_T_47[4:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_48 = _uncommonBits_T_48[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_49 = _uncommonBits_T_49[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_50 = _uncommonBits_T_50[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_51 = _uncommonBits_T_51[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_52 = _uncommonBits_T_52[4:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_53 = _uncommonBits_T_53[4:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_54 = _uncommonBits_T_54[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_55 = _uncommonBits_T_55[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_56 = _uncommonBits_T_56[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_57 = _uncommonBits_T_57[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_58 = _uncommonBits_T_58[4:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_59 = _uncommonBits_T_59[4:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_60 = _uncommonBits_T_60[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_61 = _uncommonBits_T_61[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_62 = _uncommonBits_T_62[1:0]; // @[Parameters.scala:52:{29,56}] wire [1:0] uncommonBits_63 = _uncommonBits_T_63[1:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_64 = _uncommonBits_T_64[4:0]; // @[Parameters.scala:52:{29,56}] wire [4:0] uncommonBits_65 = _uncommonBits_T_65[4:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_50 = io_in_d_bits_source_0 == 9'h90; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_0 = _source_ok_T_50; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_6 = _source_ok_uncommonBits_T_6[1:0]; // @[Parameters.scala:52:{29,56}] wire [6:0] _source_ok_T_51 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_57 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_63 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire [6:0] _source_ok_T_69 = io_in_d_bits_source_0[8:2]; // @[Monitor.scala:36:7] wire _source_ok_T_52 = _source_ok_T_51 == 7'h20; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_54 = _source_ok_T_52; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_56 = _source_ok_T_54; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_1 = _source_ok_T_56; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_7 = _source_ok_uncommonBits_T_7[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_58 = _source_ok_T_57 == 7'h21; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_60 = _source_ok_T_58; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_62 = _source_ok_T_60; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_2 = _source_ok_T_62; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_8 = _source_ok_uncommonBits_T_8[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_64 = _source_ok_T_63 == 7'h22; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_66 = _source_ok_T_64; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_68 = _source_ok_T_66; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_3 = _source_ok_T_68; // @[Parameters.scala:1138:31] wire [1:0] source_ok_uncommonBits_9 = _source_ok_uncommonBits_T_9[1:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_70 = _source_ok_T_69 == 7'h23; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_72 = _source_ok_T_70; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_74 = _source_ok_T_72; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_4 = _source_ok_T_74; // @[Parameters.scala:1138:31] wire _source_ok_T_75 = io_in_d_bits_source_0 == 9'h40; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_5 = _source_ok_T_75; // @[Parameters.scala:1138:31] wire _source_ok_T_76 = io_in_d_bits_source_0 == 9'h41; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_6 = _source_ok_T_76; // @[Parameters.scala:1138:31] wire [4:0] source_ok_uncommonBits_10 = _source_ok_uncommonBits_T_10[4:0]; // @[Parameters.scala:52:{29,56}] wire [3:0] _source_ok_T_77 = io_in_d_bits_source_0[8:5]; // @[Monitor.scala:36:7] wire [3:0] _source_ok_T_83 = io_in_d_bits_source_0[8:5]; // @[Monitor.scala:36:7] wire _source_ok_T_78 = _source_ok_T_77 == 4'h1; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_80 = _source_ok_T_78; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_82 = _source_ok_T_80; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_7 = _source_ok_T_82; // @[Parameters.scala:1138:31] wire [4:0] source_ok_uncommonBits_11 = _source_ok_uncommonBits_T_11[4:0]; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_84 = _source_ok_T_83 == 4'h0; // @[Parameters.scala:54:{10,32}] wire _source_ok_T_86 = _source_ok_T_84; // @[Parameters.scala:54:{32,67}] wire _source_ok_T_88 = _source_ok_T_86; // @[Parameters.scala:54:67, :56:48] wire _source_ok_WIRE_1_8 = _source_ok_T_88; // @[Parameters.scala:1138:31] wire _source_ok_T_89 = io_in_d_bits_source_0 == 9'h42; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_9 = _source_ok_T_89; // @[Parameters.scala:1138:31] wire _source_ok_T_90 = io_in_d_bits_source_0 == 9'h100; // @[Monitor.scala:36:7] wire _source_ok_WIRE_1_10 = _source_ok_T_90; // @[Parameters.scala:1138:31] wire _source_ok_T_91 = _source_ok_WIRE_1_0 | _source_ok_WIRE_1_1; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_92 = _source_ok_T_91 | _source_ok_WIRE_1_2; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_93 = _source_ok_T_92 | _source_ok_WIRE_1_3; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_94 = _source_ok_T_93 | _source_ok_WIRE_1_4; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_95 = _source_ok_T_94 | _source_ok_WIRE_1_5; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_96 = _source_ok_T_95 | _source_ok_WIRE_1_6; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_97 = _source_ok_T_96 | _source_ok_WIRE_1_7; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_98 = _source_ok_T_97 | _source_ok_WIRE_1_8; // @[Parameters.scala:1138:31, :1139:46] wire _source_ok_T_99 = _source_ok_T_98 | _source_ok_WIRE_1_9; // @[Parameters.scala:1138:31, :1139:46] wire source_ok_1 = _source_ok_T_99 | _source_ok_WIRE_1_10; // @[Parameters.scala:1138:31, :1139:46] wire _T_1266 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_1266; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_1266; // @[Decoupled.scala:51:35] wire [5:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode = _a_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1 = a_first_beats1_opdata ? a_first_beats1_decode : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T = {1'h0, a_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1 = _a_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire a_first = a_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T = a_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_1 = a_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last = _a_first_last_T | _a_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire a_first_done = a_first_last & _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count = a_first_beats1 & _a_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T = a_first ? a_first_beats1 : a_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [8:0] source; // @[Monitor.scala:390:22] reg [20:0] address; // @[Monitor.scala:391:22] wire _T_1334 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_1334; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_1334; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_1334; // @[Decoupled.scala:51:35] wire [12:0] _GEN_0 = 13'h3F << io_in_d_bits_size_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [12:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode = _d_first_beats1_decode_T_2[5:3]; // @[package.scala:243:46] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire [2:0] d_first_beats1 = d_first_beats1_opdata ? d_first_beats1_decode : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T = {1'h0, d_first_counter} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1 = _d_first_counter1_T[2:0]; // @[Edges.scala:230:28] wire d_first = d_first_counter == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T = d_first_counter == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_1 = d_first_beats1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last = _d_first_last_T | _d_first_last_T_1; // @[Edges.scala:232:{25,33,43}] wire d_first_done = d_first_last & _d_first_T; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count = d_first_beats1 & _d_first_count_T; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T = d_first ? d_first_beats1 : d_first_counter1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [8:0] source_1; // @[Monitor.scala:541:22] reg [256:0] inflight; // @[Monitor.scala:614:27] reg [1027:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [1027:0] inflight_sizes; // @[Monitor.scala:618:33] wire [5:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] a_first_beats1_decode_1 = _a_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] wire [2:0] a_first_beats1_1 = a_first_beats1_opdata_1 ? a_first_beats1_decode_1 : 3'h0; // @[Edges.scala:92:28, :220:59, :221:14] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] a_first_counter1_1 = _a_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _a_first_last_T_2 = a_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _a_first_last_T_3 = a_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire a_first_last_1 = _a_first_last_T_2 | _a_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire a_first_done_1 = a_first_last_1 & _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] a_first_count_1 = a_first_beats1_1 & _a_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _a_first_counter_T_1 = a_first_1 ? a_first_beats1_1 : a_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [5:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_1 = _d_first_beats1_decode_T_5[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_1 = d_first_beats1_opdata_1 ? d_first_beats1_decode_1 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_1 = _d_first_counter1_T_1[2:0]; // @[Edges.scala:230:28] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_2 = d_first_counter_1 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_3 = d_first_beats1_1 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_1 = _d_first_last_T_2 | _d_first_last_T_3; // @[Edges.scala:232:{25,33,43}] wire d_first_done_1 = d_first_last_1 & _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_1 = d_first_beats1_1 & _d_first_count_T_1; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_1 = d_first_1 ? d_first_beats1_1 : d_first_counter1_1; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [256:0] a_set; // @[Monitor.scala:626:34] wire [256:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [1027:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [1027:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [11:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [11:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [11:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [11:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [11:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [11:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [11:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [11:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [11:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [1027:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [1027:0] _a_opcode_lookup_T_6 = {1024'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [1027:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[1027:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [1027:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [1027:0] _a_size_lookup_T_6 = {1024'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [1027:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[1027:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [3:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [511:0] _GEN_2 = 512'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [511:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [511:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[256:0] : 257'h0; // @[OneHot.scala:58:35] wire _T_1199 = _T_1266 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_1199 ? _a_set_T[256:0] : 257'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_1199 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [3:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [3:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_1199 ? _a_sizes_set_interm_T_1 : 4'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [11:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [11:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [11:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [4098:0] _a_opcodes_set_T_1 = {4095'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_1199 ? _a_opcodes_set_T_1[1027:0] : 1028'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [4098:0] _a_sizes_set_T_1 = {4095'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_1199 ? _a_sizes_set_T_1[1027:0] : 1028'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [256:0] d_clr; // @[Monitor.scala:664:34] wire [256:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [1027:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [1027:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_1245 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [511:0] _GEN_5 = 512'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [511:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [511:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [511:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [511:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_1245 & ~d_release_ack ? _d_clr_wo_ready_T[256:0] : 257'h0; // @[OneHot.scala:58:35] wire _T_1214 = _T_1334 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_1214 ? _d_clr_T[256:0] : 257'h0; // @[OneHot.scala:58:35] wire [4110:0] _d_opcodes_clr_T_5 = 4111'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_1214 ? _d_opcodes_clr_T_5[1027:0] : 1028'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [4110:0] _d_sizes_clr_T_5 = 4111'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_1214 ? _d_sizes_clr_T_5[1027:0] : 1028'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [256:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [256:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [256:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [1027:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [1027:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [1027:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [1027:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [1027:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [1027:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [256:0] inflight_1; // @[Monitor.scala:726:35] wire [256:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [1027:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [1027:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [1027:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [1027:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire [5:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[5:0]; // @[package.scala:243:{71,76}] wire [5:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] wire [2:0] d_first_beats1_decode_2 = _d_first_beats1_decode_T_8[5:3]; // @[package.scala:243:46] wire [2:0] d_first_beats1_2 = d_first_beats1_opdata_2 ? d_first_beats1_decode_2 : 3'h0; // @[Edges.scala:106:36, :220:59, :221:14] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire [3:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 4'h1; // @[Edges.scala:229:27, :230:28] wire [2:0] d_first_counter1_2 = _d_first_counter1_T_2[2:0]; // @[Edges.scala:230:28] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _d_first_last_T_4 = d_first_counter_2 == 3'h1; // @[Edges.scala:229:27, :232:25] wire _d_first_last_T_5 = d_first_beats1_2 == 3'h0; // @[Edges.scala:221:14, :232:43] wire d_first_last_2 = _d_first_last_T_4 | _d_first_last_T_5; // @[Edges.scala:232:{25,33,43}] wire d_first_done_2 = d_first_last_2 & _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire [2:0] d_first_count_2 = d_first_beats1_2 & _d_first_count_T_2; // @[Edges.scala:221:14, :234:{25,27}] wire [2:0] _d_first_counter_T_2 = d_first_2 ? d_first_beats1_2 : d_first_counter1_2; // @[Edges.scala:221:14, :230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [1027:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [1027:0] _c_opcode_lookup_T_6 = {1024'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [1027:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[1027:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [1027:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [1027:0] _c_size_lookup_T_6 = {1024'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [1027:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[1027:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [256:0] d_clr_1; // @[Monitor.scala:774:34] wire [256:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [1027:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [1027:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_1310 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_1310 & d_release_ack_1 ? _d_clr_wo_ready_T_1[256:0] : 257'h0; // @[OneHot.scala:58:35] wire _T_1292 = _T_1334 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_1292 ? _d_clr_T_1[256:0] : 257'h0; // @[OneHot.scala:58:35] wire [4110:0] _d_opcodes_clr_T_11 = 4111'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_1292 ? _d_opcodes_clr_T_11[1027:0] : 1028'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [4110:0] _d_sizes_clr_T_11 = 4111'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_1292 ? _d_sizes_clr_T_11[1027:0] : 1028'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 9'h0; // @[Monitor.scala:36:7, :795:113] wire [256:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [256:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [1027:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [1027:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [1027:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [1027:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File ListBuffer.scala: /* * Copyright 2019 SiFive, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You should have received a copy of LICENSE.Apache2 along with * this software. If not, you may obtain a copy at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sifive.blocks.inclusivecache import chisel3._ import chisel3.util._ import freechips.rocketchip.util._ case class ListBufferParameters[T <: Data](gen: T, queues: Int, entries: Int, bypass: Boolean) { val queueBits = log2Up(queues) val entryBits = log2Up(entries) } class ListBufferPush[T <: Data](params: ListBufferParameters[T]) extends Bundle { val index = UInt(params.queueBits.W) val data = Output(params.gen) } class ListBuffer[T <: Data](params: ListBufferParameters[T]) extends Module { override def desiredName = s"ListBuffer_${params.gen.typeName}_q${params.queues}_e${params.entries}" val io = IO(new Bundle { // push is visible on the same cycle; flow queues val push = Flipped(Decoupled(new ListBufferPush(params))) val valid = UInt(params.queues.W) val pop = Flipped(Valid(UInt(params.queueBits.W))) val data = Output(params.gen) }) val valid = RegInit(0.U(params.queues.W)) val head = Mem(params.queues, UInt(params.entryBits.W)) val tail = Mem(params.queues, UInt(params.entryBits.W)) val used = RegInit(0.U(params.entries.W)) val next = Mem(params.entries, UInt(params.entryBits.W)) val data = Mem(params.entries, params.gen) val freeOH = ~(leftOR(~used) << 1) & ~used val freeIdx = OHToUInt(freeOH) val valid_set = WireDefault(0.U(params.queues.W)) val valid_clr = WireDefault(0.U(params.queues.W)) val used_set = WireDefault(0.U(params.entries.W)) val used_clr = WireDefault(0.U(params.entries.W)) val push_tail = tail.read(io.push.bits.index) val push_valid = valid(io.push.bits.index) io.push.ready := !used.andR when (io.push.fire) { valid_set := UIntToOH(io.push.bits.index, params.queues) used_set := freeOH data.write(freeIdx, io.push.bits.data) when (push_valid) { next.write(push_tail, freeIdx) } .otherwise { head.write(io.push.bits.index, freeIdx) } tail.write(io.push.bits.index, freeIdx) } val pop_head = head.read(io.pop.bits) val pop_valid = valid(io.pop.bits) // Bypass push data to the peek port io.data := (if (!params.bypass) data.read(pop_head) else Mux(!pop_valid, io.push.bits.data, data.read(pop_head))) io.valid := (if (!params.bypass) valid else (valid | valid_set)) // It is an error to pop something that is not valid assert (!io.pop.fire || (io.valid)(io.pop.bits)) when (io.pop.fire) { used_clr := UIntToOH(pop_head, params.entries) when (pop_head === tail.read(io.pop.bits)) { valid_clr := UIntToOH(io.pop.bits, params.queues) } head.write(io.pop.bits, Mux(io.push.fire && push_valid && push_tail === pop_head, freeIdx, next.read(pop_head))) } // Empty bypass changes no state when ((!params.bypass).B || !io.pop.valid || pop_valid) { used := (used & ~used_clr) | used_set valid := (valid & ~valid_clr) | valid_set } }
module data_40x289( // @[ListBuffer.scala:52:18] input [5:0] R0_addr, input R0_en, input R0_clk, output [288:0] R0_data, input [5:0] W0_addr, input W0_en, input W0_clk, input [288:0] W0_data ); reg [288:0] Memory[0:39]; // @[ListBuffer.scala:52:18] always @(posedge W0_clk) begin // @[ListBuffer.scala:52:18] if (W0_en & 1'h1) // @[ListBuffer.scala:52:18] Memory[W0_addr] <= W0_data; // @[ListBuffer.scala:52:18] always @(posedge) assign R0_data = R0_en ? Memory[R0_addr] : 289'bx; // @[ListBuffer.scala:52:18] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_59( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_c_ready, // @[Monitor.scala:20:14] input io_in_c_valid, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_c_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_c_bits_source, // @[Monitor.scala:20:14] input [31:0] io_in_c_bits_address, // @[Monitor.scala:20:14] input io_in_c_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_param, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [3:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_sink, // @[Monitor.scala:20:14] input io_in_d_bits_denied, // @[Monitor.scala:20:14] input io_in_d_bits_corrupt, // @[Monitor.scala:20:14] input io_in_e_valid, // @[Monitor.scala:20:14] input [2:0] io_in_e_bits_sink // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire [12:0] _GEN = {10'h0, io_in_a_bits_size}; // @[package.scala:243:71] wire [12:0] _GEN_0 = {10'h0, io_in_c_bits_size}; // @[package.scala:243:71] wire _a_first_T_1 = io_in_a_ready & io_in_a_valid; // @[Decoupled.scala:51:35] reg [2:0] a_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [2:0] size; // @[Monitor.scala:389:22] reg [3:0] source; // @[Monitor.scala:390:22] reg [31:0] address; // @[Monitor.scala:391:22] wire _d_first_T_3 = io_in_d_ready & io_in_d_valid; // @[Decoupled.scala:51:35] reg [2:0] d_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] param_1; // @[Monitor.scala:539:22] reg [2:0] size_1; // @[Monitor.scala:540:22] reg [3:0] source_1; // @[Monitor.scala:541:22] reg [2:0] sink; // @[Monitor.scala:542:22] reg denied; // @[Monitor.scala:543:22] wire _c_first_T_1 = io_in_c_ready & io_in_c_valid; // @[Decoupled.scala:51:35] reg [2:0] c_first_counter; // @[Edges.scala:229:27] reg [2:0] opcode_3; // @[Monitor.scala:515:22] reg [2:0] param_3; // @[Monitor.scala:516:22] reg [2:0] size_3; // @[Monitor.scala:517:22] reg [3:0] source_3; // @[Monitor.scala:518:22] reg [31:0] address_2; // @[Monitor.scala:519:22] reg [9:0] inflight; // @[Monitor.scala:614:27] reg [39:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [39:0] inflight_sizes; // @[Monitor.scala:618:33] reg [2:0] a_first_counter_1; // @[Edges.scala:229:27] wire a_first_1 = a_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_1; // @[Edges.scala:229:27] wire d_first_1 = d_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] wire [15:0] _GEN_1 = {12'h0, io_in_a_bits_source}; // @[OneHot.scala:58:35] wire _GEN_2 = _a_first_T_1 & a_first_1; // @[Decoupled.scala:51:35] wire d_release_ack = io_in_d_bits_opcode == 3'h6; // @[Monitor.scala:673:46] wire _GEN_3 = io_in_d_bits_opcode != 3'h6; // @[Monitor.scala:673:46, :674:74] wire [15:0] _GEN_4 = {12'h0, io_in_d_bits_source}; // @[OneHot.scala:58:35] reg [31:0] watchdog; // @[Monitor.scala:709:27] reg [9:0] inflight_1; // @[Monitor.scala:726:35] reg [39:0] inflight_sizes_1; // @[Monitor.scala:728:35] reg [2:0] c_first_counter_1; // @[Edges.scala:229:27] wire c_first_1 = c_first_counter_1 == 3'h0; // @[Edges.scala:229:27, :231:25] reg [2:0] d_first_counter_2; // @[Edges.scala:229:27] wire d_first_2 = d_first_counter_2 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_5 = io_in_c_bits_opcode[2] & io_in_c_bits_opcode[1]; // @[Edges.scala:68:{36,40,51}] wire [15:0] _GEN_6 = {12'h0, io_in_c_bits_source}; // @[OneHot.scala:58:35] wire _GEN_7 = _c_first_T_1 & c_first_1 & _GEN_5; // @[Decoupled.scala:51:35] reg [31:0] watchdog_1; // @[Monitor.scala:818:27] reg [7:0] inflight_2; // @[Monitor.scala:828:27] reg [2:0] d_first_counter_3; // @[Edges.scala:229:27] wire d_first_3 = d_first_counter_3 == 3'h0; // @[Edges.scala:229:27, :231:25] wire _GEN_8 = _d_first_T_3 & d_first_3 & io_in_d_bits_opcode[2] & ~(io_in_d_bits_opcode[1]); // @[Decoupled.scala:51:35] wire [7:0] _GEN_9 = {5'h0, io_in_d_bits_sink}; // @[OneHot.scala:58:35] wire [7:0] d_set = _GEN_8 ? 8'h1 << _GEN_9 : 8'h0; // @[OneHot.scala:58:35] wire [7:0] _GEN_10 = {5'h0, io_in_e_bits_sink}; // @[OneHot.scala:58:35]
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async }
module AsyncQueueSource_DebugInternalBundle( // @[AsyncQueue.scala:70:7] input clock, // @[AsyncQueue.scala:70:7] input reset, // @[AsyncQueue.scala:70:7] output io_enq_ready, // @[AsyncQueue.scala:73:14] input io_enq_valid, // @[AsyncQueue.scala:73:14] input io_enq_bits_resumereq, // @[AsyncQueue.scala:73:14] input [9:0] io_enq_bits_hartsel, // @[AsyncQueue.scala:73:14] input io_enq_bits_ackhavereset, // @[AsyncQueue.scala:73:14] input io_enq_bits_hrmask_0, // @[AsyncQueue.scala:73:14] output io_async_mem_0_resumereq, // @[AsyncQueue.scala:73:14] output [9:0] io_async_mem_0_hartsel, // @[AsyncQueue.scala:73:14] output io_async_mem_0_ackhavereset, // @[AsyncQueue.scala:73:14] output io_async_mem_0_hrmask_0, // @[AsyncQueue.scala:73:14] input io_async_ridx, // @[AsyncQueue.scala:73:14] output io_async_widx, // @[AsyncQueue.scala:73:14] input io_async_safe_ridx_valid, // @[AsyncQueue.scala:73:14] output io_async_safe_widx_valid, // @[AsyncQueue.scala:73:14] output io_async_safe_source_reset_n, // @[AsyncQueue.scala:73:14] input io_async_safe_sink_reset_n // @[AsyncQueue.scala:73:14] ); wire _sink_extend_io_out; // @[AsyncQueue.scala:105:30] wire _source_valid_0_io_out; // @[AsyncQueue.scala:102:32] wire io_enq_valid_0 = io_enq_valid; // @[AsyncQueue.scala:70:7] wire io_enq_bits_resumereq_0 = io_enq_bits_resumereq; // @[AsyncQueue.scala:70:7] wire [9:0] io_enq_bits_hartsel_0 = io_enq_bits_hartsel; // @[AsyncQueue.scala:70:7] wire io_enq_bits_ackhavereset_0 = io_enq_bits_ackhavereset; // @[AsyncQueue.scala:70:7] wire io_enq_bits_hrmask_0_0 = io_enq_bits_hrmask_0; // @[AsyncQueue.scala:70:7] wire io_async_ridx_0 = io_async_ridx; // @[AsyncQueue.scala:70:7] wire io_async_safe_ridx_valid_0 = io_async_safe_ridx_valid; // @[AsyncQueue.scala:70:7] wire io_async_safe_sink_reset_n_0 = io_async_safe_sink_reset_n; // @[AsyncQueue.scala:70:7] wire _widx_T = reset; // @[AsyncQueue.scala:83:30] wire _ready_reg_T = reset; // @[AsyncQueue.scala:90:35] wire _widx_reg_T = reset; // @[AsyncQueue.scala:93:34] wire _source_valid_0_reset_T = reset; // @[AsyncQueue.scala:107:36] wire _source_valid_1_reset_T = reset; // @[AsyncQueue.scala:108:36] wire _sink_extend_reset_T = reset; // @[AsyncQueue.scala:109:36] wire _sink_valid_reset_T = reset; // @[AsyncQueue.scala:110:35] wire _io_async_safe_source_reset_n_T = reset; // @[AsyncQueue.scala:123:34] wire io_enq_bits_hasel = 1'h0; // @[AsyncQueue.scala:70:7] wire io_enq_bits_hamask_0 = 1'h0; // @[AsyncQueue.scala:70:7] wire io_async_mem_0_hasel = 1'h0; // @[AsyncQueue.scala:70:7] wire io_async_mem_0_hamask_0 = 1'h0; // @[AsyncQueue.scala:70:7] wire _widx_T_3 = 1'h0; // @[AsyncQueue.scala:54:32] wire _io_enq_ready_T; // @[AsyncQueue.scala:91:29] wire _io_async_safe_source_reset_n_T_1; // @[AsyncQueue.scala:123:27] wire io_enq_ready_0; // @[AsyncQueue.scala:70:7] wire io_async_mem_0_hrmask_0_0; // @[AsyncQueue.scala:70:7] wire io_async_mem_0_resumereq_0; // @[AsyncQueue.scala:70:7] wire [9:0] io_async_mem_0_hartsel_0; // @[AsyncQueue.scala:70:7] wire io_async_mem_0_ackhavereset_0; // @[AsyncQueue.scala:70:7] wire io_async_safe_widx_valid_0; // @[AsyncQueue.scala:70:7] wire io_async_safe_source_reset_n_0; // @[AsyncQueue.scala:70:7] wire io_async_widx_0; // @[AsyncQueue.scala:70:7] wire sink_ready; // @[AsyncQueue.scala:81:28] reg mem_0_resumereq; // @[AsyncQueue.scala:82:16] assign io_async_mem_0_resumereq_0 = mem_0_resumereq; // @[AsyncQueue.scala:70:7, :82:16] reg [9:0] mem_0_hartsel; // @[AsyncQueue.scala:82:16] assign io_async_mem_0_hartsel_0 = mem_0_hartsel; // @[AsyncQueue.scala:70:7, :82:16] reg mem_0_ackhavereset; // @[AsyncQueue.scala:82:16] assign io_async_mem_0_ackhavereset_0 = mem_0_ackhavereset; // @[AsyncQueue.scala:70:7, :82:16] reg mem_0_hrmask_0; // @[AsyncQueue.scala:82:16] assign io_async_mem_0_hrmask_0_0 = mem_0_hrmask_0; // @[AsyncQueue.scala:70:7, :82:16] wire _widx_T_1 = io_enq_ready_0 & io_enq_valid_0; // @[Decoupled.scala:51:35] wire _widx_T_2 = ~sink_ready; // @[AsyncQueue.scala:81:28, :83:77] wire _widx_incremented_T_2; // @[AsyncQueue.scala:53:23] wire widx_incremented; // @[AsyncQueue.scala:51:27] wire widx = widx_incremented; // @[AsyncQueue.scala:51:27, :54:17] reg widx_widx_bin; // @[AsyncQueue.scala:52:25] wire [1:0] _widx_incremented_T = {1'h0, widx_widx_bin} + {1'h0, _widx_T_1}; // @[Decoupled.scala:51:35] wire _widx_incremented_T_1 = _widx_incremented_T[0]; // @[AsyncQueue.scala:53:43] assign _widx_incremented_T_2 = ~_widx_T_2 & _widx_incremented_T_1; // @[AsyncQueue.scala:53:{23,43}, :83:77] assign widx_incremented = _widx_incremented_T_2; // @[AsyncQueue.scala:51:27, :53:23] wire ridx; // @[ShiftReg.scala:48:24] wire _ready_T = ~ridx; // @[ShiftReg.scala:48:24] wire _ready_T_1 = widx != _ready_T; // @[AsyncQueue.scala:54:17, :85:{34,44}] wire ready = sink_ready & _ready_T_1; // @[AsyncQueue.scala:81:28, :85:{26,34}] reg ready_reg; // @[AsyncQueue.scala:90:56] assign _io_enq_ready_T = ready_reg & sink_ready; // @[AsyncQueue.scala:81:28, :90:56, :91:29] assign io_enq_ready_0 = _io_enq_ready_T; // @[AsyncQueue.scala:70:7, :91:29] reg widx_gray; // @[AsyncQueue.scala:93:55] assign io_async_widx_0 = widx_gray; // @[AsyncQueue.scala:70:7, :93:55] wire _source_valid_0_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46] wire _source_valid_0_reset_T_2 = _source_valid_0_reset_T | _source_valid_0_reset_T_1; // @[AsyncQueue.scala:107:{36,43,46}] wire _source_valid_0_reset_T_3 = _source_valid_0_reset_T_2; // @[AsyncQueue.scala:107:{43,65}] wire _source_valid_1_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46, :108:46] wire _source_valid_1_reset_T_2 = _source_valid_1_reset_T | _source_valid_1_reset_T_1; // @[AsyncQueue.scala:108:{36,43,46}] wire _source_valid_1_reset_T_3 = _source_valid_1_reset_T_2; // @[AsyncQueue.scala:108:{43,65}] wire _sink_extend_reset_T_1 = ~io_async_safe_sink_reset_n_0; // @[AsyncQueue.scala:70:7, :107:46, :109:46] wire _sink_extend_reset_T_2 = _sink_extend_reset_T | _sink_extend_reset_T_1; // @[AsyncQueue.scala:109:{36,43,46}] wire _sink_extend_reset_T_3 = _sink_extend_reset_T_2; // @[AsyncQueue.scala:109:{43,65}] assign _io_async_safe_source_reset_n_T_1 = ~_io_async_safe_source_reset_n_T; // @[AsyncQueue.scala:123:{27,34}] assign io_async_safe_source_reset_n_0 = _io_async_safe_source_reset_n_T_1; // @[AsyncQueue.scala:70:7, :123:27] always @(posedge clock) begin // @[AsyncQueue.scala:70:7] if (_widx_T_1) begin // @[Decoupled.scala:51:35] mem_0_resumereq <= io_enq_bits_resumereq_0; // @[AsyncQueue.scala:70:7, :82:16] mem_0_hartsel <= io_enq_bits_hartsel_0; // @[AsyncQueue.scala:70:7, :82:16] mem_0_ackhavereset <= io_enq_bits_ackhavereset_0; // @[AsyncQueue.scala:70:7, :82:16] mem_0_hrmask_0 <= io_enq_bits_hrmask_0_0; // @[AsyncQueue.scala:70:7, :82:16] end always @(posedge) always @(posedge clock or posedge _widx_T) begin // @[AsyncQueue.scala:70:7, :83:30] if (_widx_T) // @[AsyncQueue.scala:70:7, :83:30] widx_widx_bin <= 1'h0; // @[AsyncQueue.scala:52:25] else // @[AsyncQueue.scala:70:7] widx_widx_bin <= widx_incremented; // @[AsyncQueue.scala:51:27, :52:25] always @(posedge, posedge) always @(posedge clock or posedge _ready_reg_T) begin // @[AsyncQueue.scala:70:7, :90:35] if (_ready_reg_T) // @[AsyncQueue.scala:70:7, :90:35] ready_reg <= 1'h0; // @[AsyncQueue.scala:90:56] else // @[AsyncQueue.scala:70:7] ready_reg <= ready; // @[AsyncQueue.scala:85:26, :90:56] always @(posedge, posedge) always @(posedge clock or posedge _widx_reg_T) begin // @[AsyncQueue.scala:70:7, :93:34] if (_widx_reg_T) // @[AsyncQueue.scala:70:7, :93:34] widx_gray <= 1'h0; // @[AsyncQueue.scala:93:55] else // @[AsyncQueue.scala:70:7] widx_gray <= widx; // @[AsyncQueue.scala:54:17, :93:55] always @(posedge, posedge)
Generate the Verilog code corresponding to the following Chisel files. File InputUnit.scala: package constellation.router import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.{Field, Parameters} import freechips.rocketchip.util._ import constellation.channel._ import constellation.routing.{FlowRoutingBundle} import constellation.noc.{HasNoCParams} class AbstractInputUnitIO( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams], )(implicit val p: Parameters) extends Bundle with HasRouterOutputParams { val nodeId = cParam.destId val router_req = Decoupled(new RouteComputerReq) val router_resp = Input(new RouteComputerResp(outParams, egressParams)) val vcalloc_req = Decoupled(new VCAllocReq(cParam, outParams, egressParams)) val vcalloc_resp = Input(new VCAllocResp(outParams, egressParams)) val out_credit_available = Input(MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) })) val salloc_req = Vec(cParam.destSpeedup, Decoupled(new SwitchAllocReq(outParams, egressParams))) val out = Vec(cParam.destSpeedup, Valid(new SwitchBundle(outParams, egressParams))) val debug = Output(new Bundle { val va_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) val sa_stall = UInt(log2Ceil(cParam.nVirtualChannels).W) }) val block = Input(Bool()) } abstract class AbstractInputUnit( val cParam: BaseChannelParams, val outParams: Seq[ChannelParams], val egressParams: Seq[EgressChannelParams] )(implicit val p: Parameters) extends Module with HasRouterOutputParams with HasNoCParams { val nodeId = cParam.destId def io: AbstractInputUnitIO } class InputBuffer(cParam: ChannelParams)(implicit p: Parameters) extends Module { val nVirtualChannels = cParam.nVirtualChannels val io = IO(new Bundle { val enq = Flipped(Vec(cParam.srcSpeedup, Valid(new Flit(cParam.payloadBits)))) val deq = Vec(cParam.nVirtualChannels, Decoupled(new BaseFlit(cParam.payloadBits))) }) val useOutputQueues = cParam.useOutputQueues val delims = if (useOutputQueues) { cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize else 0).scanLeft(0)(_+_) } else { // If no queuing, have to add an additional slot since head == tail implies empty // TODO this should be fixed, should use all slots available cParam.virtualChannelParams.map(u => if (u.traversable) u.bufferSize + 1 else 0).scanLeft(0)(_+_) } val starts = delims.dropRight(1).zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val ends = delims.tail.zipWithIndex.map { case (s,i) => if (cParam.virtualChannelParams(i).traversable) s else 0 } val fullSize = delims.last // Ugly case. Use multiple queues if ((cParam.srcSpeedup > 1 || cParam.destSpeedup > 1 || fullSize <= 1) || !cParam.unifiedBuffer) { require(useOutputQueues) val qs = cParam.virtualChannelParams.map(v => Module(new Queue(new BaseFlit(cParam.payloadBits), v.bufferSize))) qs.zipWithIndex.foreach { case (q,i) => val sel = io.enq.map(f => f.valid && f.bits.virt_channel_id === i.U) q.io.enq.valid := sel.orR q.io.enq.bits.head := Mux1H(sel, io.enq.map(_.bits.head)) q.io.enq.bits.tail := Mux1H(sel, io.enq.map(_.bits.tail)) q.io.enq.bits.payload := Mux1H(sel, io.enq.map(_.bits.payload)) io.deq(i) <> q.io.deq } } else { val mem = Mem(fullSize, new BaseFlit(cParam.payloadBits)) val heads = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val tails = RegInit(VecInit(starts.map(_.U(log2Ceil(fullSize).W)))) val empty = (heads zip tails).map(t => t._1 === t._2) val qs = Seq.fill(nVirtualChannels) { Module(new Queue(new BaseFlit(cParam.payloadBits), 1, pipe=true)) } qs.foreach(_.io.enq.valid := false.B) qs.foreach(_.io.enq.bits := DontCare) val vc_sel = UIntToOH(io.enq(0).bits.virt_channel_id) val flit = Wire(new BaseFlit(cParam.payloadBits)) val direct_to_q = (Mux1H(vc_sel, qs.map(_.io.enq.ready)) && Mux1H(vc_sel, empty)) && useOutputQueues.B flit.head := io.enq(0).bits.head flit.tail := io.enq(0).bits.tail flit.payload := io.enq(0).bits.payload when (io.enq(0).valid && !direct_to_q) { val tail = tails(io.enq(0).bits.virt_channel_id) mem.write(tail, flit) tails(io.enq(0).bits.virt_channel_id) := Mux( tail === Mux1H(vc_sel, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(vc_sel, starts.map(_.U)), tail + 1.U) } .elsewhen (io.enq(0).valid && direct_to_q) { for (i <- 0 until nVirtualChannels) { when (io.enq(0).bits.virt_channel_id === i.U) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := flit } } } if (useOutputQueues) { val can_to_q = (0 until nVirtualChannels).map { i => !empty(i) && qs(i).io.enq.ready } val to_q_oh = PriorityEncoderOH(can_to_q) val to_q = OHToUInt(to_q_oh) when (can_to_q.orR) { val head = Mux1H(to_q_oh, heads) heads(to_q) := Mux( head === Mux1H(to_q_oh, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(to_q_oh, starts.map(_.U)), head + 1.U) for (i <- 0 until nVirtualChannels) { when (to_q_oh(i)) { qs(i).io.enq.valid := true.B qs(i).io.enq.bits := mem.read(head) } } } for (i <- 0 until nVirtualChannels) { io.deq(i) <> qs(i).io.deq } } else { qs.map(_.io.deq.ready := false.B) val ready_sel = io.deq.map(_.ready) val fire = io.deq.map(_.fire) assert(PopCount(fire) <= 1.U) val head = Mux1H(fire, heads) when (fire.orR) { val fire_idx = OHToUInt(fire) heads(fire_idx) := Mux( head === Mux1H(fire, ends.map(_ - 1).map(_ max 0).map(_.U)), Mux1H(fire, starts.map(_.U)), head + 1.U) } val read_flit = mem.read(head) for (i <- 0 until nVirtualChannels) { io.deq(i).valid := !empty(i) io.deq(i).bits := read_flit } } } } class InputUnit(cParam: ChannelParams, outParams: Seq[ChannelParams], egressParams: Seq[EgressChannelParams], combineRCVA: Boolean, combineSAST: Boolean ) (implicit p: Parameters) extends AbstractInputUnit(cParam, outParams, egressParams)(p) { val nVirtualChannels = cParam.nVirtualChannels val virtualChannelParams = cParam.virtualChannelParams class InputUnitIO extends AbstractInputUnitIO(cParam, outParams, egressParams) { val in = Flipped(new Channel(cParam.asInstanceOf[ChannelParams])) } val io = IO(new InputUnitIO) val g_i :: g_r :: g_v :: g_a :: g_c :: Nil = Enum(5) class InputState extends Bundle { val g = UInt(3.W) val vc_sel = MixedVec(allOutParams.map { u => Vec(u.nVirtualChannels, Bool()) }) val flow = new FlowRoutingBundle val fifo_deps = UInt(nVirtualChannels.W) } val input_buffer = Module(new InputBuffer(cParam)) for (i <- 0 until cParam.srcSpeedup) { input_buffer.io.enq(i) := io.in.flit(i) } input_buffer.io.deq.foreach(_.ready := false.B) val route_arbiter = Module(new Arbiter( new RouteComputerReq, nVirtualChannels )) io.router_req <> route_arbiter.io.out val states = Reg(Vec(nVirtualChannels, new InputState)) val anyFifo = cParam.possibleFlows.map(_.fifo).reduce(_||_) val allFifo = cParam.possibleFlows.map(_.fifo).reduce(_&&_) if (anyFifo) { val idle_mask = VecInit(states.map(_.g === g_i)).asUInt for (s <- states) for (i <- 0 until nVirtualChannels) s.fifo_deps := s.fifo_deps & ~idle_mask } for (i <- 0 until cParam.srcSpeedup) { when (io.in.flit(i).fire && io.in.flit(i).bits.head) { val id = io.in.flit(i).bits.virt_channel_id assert(id < nVirtualChannels.U) assert(states(id).g === g_i) val at_dest = io.in.flit(i).bits.flow.egress_node === nodeId.U states(id).g := Mux(at_dest, g_v, g_r) states(id).vc_sel.foreach(_.foreach(_ := false.B)) for (o <- 0 until nEgress) { when (o.U === io.in.flit(i).bits.flow.egress_node_id) { states(id).vc_sel(o+nOutputs)(0) := true.B } } states(id).flow := io.in.flit(i).bits.flow if (anyFifo) { val fifo = cParam.possibleFlows.filter(_.fifo).map(_.isFlow(io.in.flit(i).bits.flow)).toSeq.orR states(id).fifo_deps := VecInit(states.zipWithIndex.map { case (s, j) => s.g =/= g_i && s.flow.asUInt === io.in.flit(i).bits.flow.asUInt && j.U =/= id }).asUInt } } } (route_arbiter.io.in zip states).zipWithIndex.map { case ((i,s),idx) => if (virtualChannelParams(idx).traversable) { i.valid := s.g === g_r i.bits.flow := s.flow i.bits.src_virt_id := idx.U when (i.fire) { s.g := g_v } } else { i.valid := false.B i.bits := DontCare } } when (io.router_req.fire) { val id = io.router_req.bits.src_virt_id assert(states(id).g === g_r) states(id).g := g_v for (i <- 0 until nVirtualChannels) { when (i.U === id) { states(i).vc_sel := io.router_resp.vc_sel } } } val mask = RegInit(0.U(nVirtualChannels.W)) val vcalloc_reqs = Wire(Vec(nVirtualChannels, new VCAllocReq(cParam, outParams, egressParams))) val vcalloc_vals = Wire(Vec(nVirtualChannels, Bool())) val vcalloc_filter = PriorityEncoderOH(Cat(vcalloc_vals.asUInt, vcalloc_vals.asUInt & ~mask)) val vcalloc_sel = vcalloc_filter(nVirtualChannels-1,0) | (vcalloc_filter >> nVirtualChannels) // Prioritize incoming packetes when (io.router_req.fire) { mask := (1.U << io.router_req.bits.src_virt_id) - 1.U } .elsewhen (vcalloc_vals.orR) { mask := Mux1H(vcalloc_sel, (0 until nVirtualChannels).map { w => ~(0.U((w+1).W)) }) } io.vcalloc_req.valid := vcalloc_vals.orR io.vcalloc_req.bits := Mux1H(vcalloc_sel, vcalloc_reqs) states.zipWithIndex.map { case (s,idx) => if (virtualChannelParams(idx).traversable) { vcalloc_vals(idx) := s.g === g_v && s.fifo_deps === 0.U vcalloc_reqs(idx).in_vc := idx.U vcalloc_reqs(idx).vc_sel := s.vc_sel vcalloc_reqs(idx).flow := s.flow when (vcalloc_vals(idx) && vcalloc_sel(idx) && io.vcalloc_req.ready) { s.g := g_a } if (combineRCVA) { when (route_arbiter.io.in(idx).fire) { vcalloc_vals(idx) := true.B vcalloc_reqs(idx).vc_sel := io.router_resp.vc_sel } } } else { vcalloc_vals(idx) := false.B vcalloc_reqs(idx) := DontCare } } io.debug.va_stall := PopCount(vcalloc_vals) - io.vcalloc_req.ready when (io.vcalloc_req.fire) { for (i <- 0 until nVirtualChannels) { when (vcalloc_sel(i)) { states(i).vc_sel := io.vcalloc_resp.vc_sel states(i).g := g_a if (!combineRCVA) { assert(states(i).g === g_v) } } } } val salloc_arb = Module(new SwitchArbiter( nVirtualChannels, cParam.destSpeedup, outParams, egressParams )) (states zip salloc_arb.io.in).zipWithIndex.map { case ((s,r),i) => if (virtualChannelParams(i).traversable) { val credit_available = (s.vc_sel.asUInt & io.out_credit_available.asUInt) =/= 0.U r.valid := s.g === g_a && credit_available && input_buffer.io.deq(i).valid r.bits.vc_sel := s.vc_sel val deq_tail = input_buffer.io.deq(i).bits.tail r.bits.tail := deq_tail when (r.fire && deq_tail) { s.g := g_i } input_buffer.io.deq(i).ready := r.ready } else { r.valid := false.B r.bits := DontCare } } io.debug.sa_stall := PopCount(salloc_arb.io.in.map(r => r.valid && !r.ready)) io.salloc_req <> salloc_arb.io.out when (io.block) { salloc_arb.io.out.foreach(_.ready := false.B) io.salloc_req.foreach(_.valid := false.B) } class OutBundle extends Bundle { val valid = Bool() val vid = UInt(virtualChannelBits.W) val out_vid = UInt(log2Up(allOutParams.map(_.nVirtualChannels).max).W) val flit = new Flit(cParam.payloadBits) } val salloc_outs = if (combineSAST) { Wire(Vec(cParam.destSpeedup, new OutBundle)) } else { Reg(Vec(cParam.destSpeedup, new OutBundle)) } io.in.credit_return := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire, salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) io.in.vc_free := salloc_arb.io.out.zipWithIndex.map { case (o, i) => Mux(o.fire && Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)), salloc_arb.io.chosen_oh(i), 0.U) }.reduce(_|_) for (i <- 0 until cParam.destSpeedup) { val salloc_out = salloc_outs(i) salloc_out.valid := salloc_arb.io.out(i).fire salloc_out.vid := OHToUInt(salloc_arb.io.chosen_oh(i)) val vc_sel = Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.vc_sel)) val channel_oh = vc_sel.map(_.reduce(_||_)).toSeq val virt_channel = Mux1H(channel_oh, vc_sel.map(v => OHToUInt(v)).toSeq) when (salloc_arb.io.out(i).fire) { salloc_out.out_vid := virt_channel salloc_out.flit.payload := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.payload)) salloc_out.flit.head := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.head)) salloc_out.flit.tail := Mux1H(salloc_arb.io.chosen_oh(i), input_buffer.io.deq.map(_.bits.tail)) salloc_out.flit.flow := Mux1H(salloc_arb.io.chosen_oh(i), states.map(_.flow)) } .otherwise { salloc_out.out_vid := DontCare salloc_out.flit := DontCare } salloc_out.flit.virt_channel_id := DontCare // this gets set in the switch io.out(i).valid := salloc_out.valid io.out(i).bits.flit := salloc_out.flit io.out(i).bits.out_virt_channel := salloc_out.out_vid } def filterVCSel(sel: MixedVec[Vec[Bool]], srcV: Int) = { if (virtualChannelParams(srcV).traversable) { outParams.zipWithIndex.map { case (oP, oI) => (0 until oP.nVirtualChannels).map { oV => var allow = false virtualChannelParams(srcV).possibleFlows.foreach { pI => allow = allow || routingRelation( cParam.channelRoutingInfos(srcV), oP.channelRoutingInfos(oV), pI ) } if (!allow) sel(oI)(oV) := false.B } } } } (0 until nVirtualChannels).map { i => if (!virtualChannelParams(i).traversable) states(i) := DontCare filterVCSel(states(i).vc_sel, i) } when (reset.asBool) { states.foreach(_.g := g_i) } }
module InputUnit_31( // @[InputUnit.scala:158:7] input clock, // @[InputUnit.scala:158:7] input reset, // @[InputUnit.scala:158:7] output [1:0] io_router_req_bits_src_virt_id, // @[InputUnit.scala:170:14] output [1:0] io_router_req_bits_flow_vnet_id, // @[InputUnit.scala:170:14] output [3:0] io_router_req_bits_flow_ingress_node, // @[InputUnit.scala:170:14] output [2:0] io_router_req_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [3:0] io_router_req_bits_flow_egress_node, // @[InputUnit.scala:170:14] output [1:0] io_router_req_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_3_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_2_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_2_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_2_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_1_2, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_0, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_1, // @[InputUnit.scala:170:14] input io_router_resp_vc_sel_0_2, // @[InputUnit.scala:170:14] input io_vcalloc_req_ready, // @[InputUnit.scala:170:14] output io_vcalloc_req_valid, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_5_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_4_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_3_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_2_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_2_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_2_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_1_2, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_0, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_1, // @[InputUnit.scala:170:14] output io_vcalloc_req_bits_vc_sel_0_2, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_5_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_4_0, // @[InputUnit.scala:170:14] input io_vcalloc_resp_vc_sel_3_0, // @[InputUnit.scala:170:14] input io_out_credit_available_5_0, // @[InputUnit.scala:170:14] input io_out_credit_available_4_0, // @[InputUnit.scala:170:14] input io_out_credit_available_3_0, // @[InputUnit.scala:170:14] input io_out_credit_available_2_0, // @[InputUnit.scala:170:14] input io_out_credit_available_1_0, // @[InputUnit.scala:170:14] input io_out_credit_available_1_1, // @[InputUnit.scala:170:14] input io_out_credit_available_1_2, // @[InputUnit.scala:170:14] input io_out_credit_available_0_0, // @[InputUnit.scala:170:14] input io_out_credit_available_0_1, // @[InputUnit.scala:170:14] input io_out_credit_available_0_2, // @[InputUnit.scala:170:14] input io_salloc_req_0_ready, // @[InputUnit.scala:170:14] output io_salloc_req_0_valid, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_5_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_4_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_3_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_2_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_1_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_0, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_1, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_vc_sel_0_2, // @[InputUnit.scala:170:14] output io_salloc_req_0_bits_tail, // @[InputUnit.scala:170:14] output io_out_0_valid, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_head, // @[InputUnit.scala:170:14] output io_out_0_bits_flit_tail, // @[InputUnit.scala:170:14] output [144:0] io_out_0_bits_flit_payload, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_flit_flow_vnet_id, // @[InputUnit.scala:170:14] output [3:0] io_out_0_bits_flit_flow_ingress_node, // @[InputUnit.scala:170:14] output [2:0] io_out_0_bits_flit_flow_ingress_node_id, // @[InputUnit.scala:170:14] output [3:0] io_out_0_bits_flit_flow_egress_node, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_flit_flow_egress_node_id, // @[InputUnit.scala:170:14] output [1:0] io_out_0_bits_out_virt_channel, // @[InputUnit.scala:170:14] output [1:0] io_debug_va_stall, // @[InputUnit.scala:170:14] output [1:0] io_debug_sa_stall, // @[InputUnit.scala:170:14] input io_in_flit_0_valid, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_head, // @[InputUnit.scala:170:14] input io_in_flit_0_bits_tail, // @[InputUnit.scala:170:14] input [144:0] io_in_flit_0_bits_payload, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_flow_vnet_id, // @[InputUnit.scala:170:14] input [3:0] io_in_flit_0_bits_flow_ingress_node, // @[InputUnit.scala:170:14] input [2:0] io_in_flit_0_bits_flow_ingress_node_id, // @[InputUnit.scala:170:14] input [3:0] io_in_flit_0_bits_flow_egress_node, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_flow_egress_node_id, // @[InputUnit.scala:170:14] input [1:0] io_in_flit_0_bits_virt_channel_id, // @[InputUnit.scala:170:14] output [2:0] io_in_credit_return, // @[InputUnit.scala:170:14] output [2:0] io_in_vc_free // @[InputUnit.scala:170:14] ); wire _GEN; // @[MixedVec.scala:116:9] wire _GEN_0; // @[MixedVec.scala:116:9] wire vcalloc_vals_2; // @[InputUnit.scala:266:25, :272:46, :273:29] wire _GEN_1; // @[MixedVec.scala:116:9] wire _GEN_2; // @[MixedVec.scala:116:9] wire vcalloc_vals_1; // @[InputUnit.scala:266:25, :272:46, :273:29] wire _GEN_3; // @[MixedVec.scala:116:9] wire _GEN_4; // @[MixedVec.scala:116:9] wire vcalloc_reqs_0_vc_sel_3_0; // @[MixedVec.scala:116:9] wire vcalloc_vals_0; // @[InputUnit.scala:266:25, :272:46, :273:29] wire _salloc_arb_io_in_0_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_1_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_in_2_ready; // @[InputUnit.scala:296:26] wire _salloc_arb_io_out_0_valid; // @[InputUnit.scala:296:26] wire [2:0] _salloc_arb_io_chosen_oh_0; // @[InputUnit.scala:296:26] wire _route_arbiter_io_in_1_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_in_2_ready; // @[InputUnit.scala:187:29] wire _route_arbiter_io_out_valid; // @[InputUnit.scala:187:29] wire [1:0] _route_arbiter_io_out_bits_src_virt_id; // @[InputUnit.scala:187:29] wire _input_buffer_io_deq_0_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_0_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_0_bits_tail; // @[InputUnit.scala:181:28] wire [144:0] _input_buffer_io_deq_0_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_1_bits_tail; // @[InputUnit.scala:181:28] wire [144:0] _input_buffer_io_deq_1_bits_payload; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_valid; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_head; // @[InputUnit.scala:181:28] wire _input_buffer_io_deq_2_bits_tail; // @[InputUnit.scala:181:28] wire [144:0] _input_buffer_io_deq_2_bits_payload; // @[InputUnit.scala:181:28] reg [2:0] states_0_g; // @[InputUnit.scala:192:19] reg states_0_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_0_vc_sel_4_0; // @[InputUnit.scala:192:19] reg states_0_vc_sel_3_0; // @[InputUnit.scala:192:19] reg [1:0] states_0_flow_vnet_id; // @[InputUnit.scala:192:19] reg [3:0] states_0_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_0_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [3:0] states_0_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_0_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_1_g; // @[InputUnit.scala:192:19] reg states_1_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_1_vc_sel_4_0; // @[InputUnit.scala:192:19] reg [1:0] states_1_flow_vnet_id; // @[InputUnit.scala:192:19] reg [3:0] states_1_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_1_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [3:0] states_1_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_1_flow_egress_node_id; // @[InputUnit.scala:192:19] reg [2:0] states_2_g; // @[InputUnit.scala:192:19] reg states_2_vc_sel_5_0; // @[InputUnit.scala:192:19] reg states_2_vc_sel_4_0; // @[InputUnit.scala:192:19] reg [1:0] states_2_flow_vnet_id; // @[InputUnit.scala:192:19] reg [3:0] states_2_flow_ingress_node; // @[InputUnit.scala:192:19] reg [2:0] states_2_flow_ingress_node_id; // @[InputUnit.scala:192:19] reg [3:0] states_2_flow_egress_node; // @[InputUnit.scala:192:19] reg [1:0] states_2_flow_egress_node_id; // @[InputUnit.scala:192:19] wire _GEN_5 = io_in_flit_0_valid & io_in_flit_0_bits_head; // @[InputUnit.scala:205:30] wire route_arbiter_io_in_0_valid = states_0_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22] wire route_arbiter_io_in_1_valid = states_1_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22] wire _GEN_6 = _route_arbiter_io_in_1_ready & route_arbiter_io_in_1_valid; // @[Decoupled.scala:51:35] wire route_arbiter_io_in_2_valid = states_2_g == 3'h1; // @[InputUnit.scala:158:7, :192:19, :229:22] wire _GEN_7 = _route_arbiter_io_in_2_ready & route_arbiter_io_in_2_valid; // @[Decoupled.scala:51:35]
Generate the Verilog code corresponding to the following Chisel files. File util.scala: //****************************************************************************** // Copyright (c) 2015 - 2019, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Utility Functions //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.util import chisel3._ import chisel3.util._ import freechips.rocketchip.rocket.Instructions._ import freechips.rocketchip.rocket._ import freechips.rocketchip.util.{Str} import org.chipsalliance.cde.config.{Parameters} import freechips.rocketchip.tile.{TileKey} import boom.v3.common.{MicroOp} import boom.v3.exu.{BrUpdateInfo} /** * Object to XOR fold a input register of fullLength into a compressedLength. */ object Fold { def apply(input: UInt, compressedLength: Int, fullLength: Int): UInt = { val clen = compressedLength val hlen = fullLength if (hlen <= clen) { input } else { var res = 0.U(clen.W) var remaining = input.asUInt for (i <- 0 to hlen-1 by clen) { val len = if (i + clen > hlen ) (hlen - i) else clen require(len > 0) res = res(clen-1,0) ^ remaining(len-1,0) remaining = remaining >> len.U } res } } } /** * Object to check if MicroOp was killed due to a branch mispredict. * Uses "Fast" branch masks */ object IsKilledByBranch { def apply(brupdate: BrUpdateInfo, uop: MicroOp): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop.br_mask) } def apply(brupdate: BrUpdateInfo, uop_mask: UInt): Bool = { return maskMatch(brupdate.b1.mispredict_mask, uop_mask) } } /** * Object to return new MicroOp with a new BR mask given a MicroOp mask * and old BR mask. */ object GetNewUopAndBrMask { def apply(uop: MicroOp, brupdate: BrUpdateInfo) (implicit p: Parameters): MicroOp = { val newuop = WireInit(uop) newuop.br_mask := uop.br_mask & ~brupdate.b1.resolve_mask newuop } } /** * Object to return a BR mask given a MicroOp mask and old BR mask. */ object GetNewBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): UInt = { return uop.br_mask & ~brupdate.b1.resolve_mask } def apply(brupdate: BrUpdateInfo, br_mask: UInt): UInt = { return br_mask & ~brupdate.b1.resolve_mask } } object UpdateBrMask { def apply(brupdate: BrUpdateInfo, uop: MicroOp): MicroOp = { val out = WireInit(uop) out.br_mask := GetNewBrMask(brupdate, uop) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: T): T = { val out = WireInit(bundle) out.uop.br_mask := GetNewBrMask(brupdate, bundle.uop.br_mask) out } def apply[T <: boom.v3.common.HasBoomUOP](brupdate: BrUpdateInfo, bundle: Valid[T]): Valid[T] = { val out = WireInit(bundle) out.bits.uop.br_mask := GetNewBrMask(brupdate, bundle.bits.uop.br_mask) out.valid := bundle.valid && !IsKilledByBranch(brupdate, bundle.bits.uop.br_mask) out } } /** * Object to check if at least 1 bit matches in two masks */ object maskMatch { def apply(msk1: UInt, msk2: UInt): Bool = (msk1 & msk2) =/= 0.U } /** * Object to clear one bit in a mask given an index */ object clearMaskBit { def apply(msk: UInt, idx: UInt): UInt = (msk & ~(1.U << idx))(msk.getWidth-1, 0) } /** * Object to shift a register over by one bit and concat a new one */ object PerformShiftRegister { def apply(reg_val: UInt, new_bit: Bool): UInt = { reg_val := Cat(reg_val(reg_val.getWidth-1, 0).asUInt, new_bit.asUInt).asUInt reg_val } } /** * Object to shift a register over by one bit, wrapping the top bit around to the bottom * (XOR'ed with a new-bit), and evicting a bit at index HLEN. * This is used to simulate a longer HLEN-width shift register that is folded * down to a compressed CLEN. */ object PerformCircularShiftRegister { def apply(csr: UInt, new_bit: Bool, evict_bit: Bool, hlen: Int, clen: Int): UInt = { val carry = csr(clen-1) val newval = Cat(csr, new_bit ^ carry) ^ (evict_bit << (hlen % clen).U) newval } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapAdd { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, amt: UInt, n: Int): UInt = { if (isPow2(n)) { (value + amt)(log2Ceil(n)-1,0) } else { val sum = Cat(0.U(1.W), value) + Cat(0.U(1.W), amt) Mux(sum >= n.U, sum - n.U, sum) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapSub { // "n" is the number of increments, so we wrap to n-1. def apply(value: UInt, amt: Int, n: Int): UInt = { if (isPow2(n)) { (value - amt.U)(log2Ceil(n)-1,0) } else { val v = Cat(0.U(1.W), value) val b = Cat(0.U(1.W), amt.U) Mux(value >= amt.U, value - amt.U, n.U - amt.U + value) } } } /** * Object to increment an input value, wrapping it if * necessary. */ object WrapInc { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value + 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === (n-1).U) Mux(wrap, 0.U, value + 1.U) } } } /** * Object to decrement an input value, wrapping it if * necessary. */ object WrapDec { // "n" is the number of increments, so we wrap at n-1. def apply(value: UInt, n: Int): UInt = { if (isPow2(n)) { (value - 1.U)(log2Ceil(n)-1,0) } else { val wrap = (value === 0.U) Mux(wrap, (n-1).U, value - 1.U) } } } /** * Object to mask off lower bits of a PC to align to a "b" * Byte boundary. */ object AlignPCToBoundary { def apply(pc: UInt, b: Int): UInt = { // Invert for scenario where pc longer than b // (which would clear all bits above size(b)). ~(~pc | (b-1).U) } } /** * Object to rotate a signal left by one */ object RotateL1 { def apply(signal: UInt): UInt = { val w = signal.getWidth val out = Cat(signal(w-2,0), signal(w-1)) return out } } /** * Object to sext a value to a particular length. */ object Sext { def apply(x: UInt, length: Int): UInt = { if (x.getWidth == length) return x else return Cat(Fill(length-x.getWidth, x(x.getWidth-1)), x) } } /** * Object to translate from BOOM's special "packed immediate" to a 32b signed immediate * Asking for U-type gives it shifted up 12 bits. */ object ImmGen { import boom.v3.common.{LONGEST_IMM_SZ, IS_B, IS_I, IS_J, IS_S, IS_U} def apply(ip: UInt, isel: UInt): SInt = { val sign = ip(LONGEST_IMM_SZ-1).asSInt val i30_20 = Mux(isel === IS_U, ip(18,8).asSInt, sign) val i19_12 = Mux(isel === IS_U || isel === IS_J, ip(7,0).asSInt, sign) val i11 = Mux(isel === IS_U, 0.S, Mux(isel === IS_J || isel === IS_B, ip(8).asSInt, sign)) val i10_5 = Mux(isel === IS_U, 0.S, ip(18,14).asSInt) val i4_1 = Mux(isel === IS_U, 0.S, ip(13,9).asSInt) val i0 = Mux(isel === IS_S || isel === IS_I, ip(8).asSInt, 0.S) return Cat(sign, i30_20, i19_12, i11, i10_5, i4_1, i0).asSInt } } /** * Object to get the FP rounding mode out of a packed immediate. */ object ImmGenRm { def apply(ip: UInt): UInt = { return ip(2,0) } } /** * Object to get the FP function fype from a packed immediate. * Note: only works if !(IS_B or IS_S) */ object ImmGenTyp { def apply(ip: UInt): UInt = { return ip(9,8) } } /** * Object to see if an instruction is a JALR. */ object DebugIsJALR { def apply(inst: UInt): Bool = { // TODO Chisel not sure why this won't compile // val is_jalr = rocket.DecodeLogic(inst, List(Bool(false)), // Array( // JALR -> Bool(true))) inst(6,0) === "b1100111".U } } /** * Object to take an instruction and output its branch or jal target. Only used * for a debug assert (no where else would we jump straight from instruction * bits to a target). */ object DebugGetBJImm { def apply(inst: UInt): UInt = { // TODO Chisel not sure why this won't compile //val csignals = //rocket.DecodeLogic(inst, // List(Bool(false), Bool(false)), // Array( // BEQ -> List(Bool(true ), Bool(false)), // BNE -> List(Bool(true ), Bool(false)), // BGE -> List(Bool(true ), Bool(false)), // BGEU -> List(Bool(true ), Bool(false)), // BLT -> List(Bool(true ), Bool(false)), // BLTU -> List(Bool(true ), Bool(false)) // )) //val is_br :: nothing :: Nil = csignals val is_br = (inst(6,0) === "b1100011".U) val br_targ = Cat(Fill(12, inst(31)), Fill(8,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) val jal_targ= Cat(Fill(12, inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) Mux(is_br, br_targ, jal_targ) } } /** * Object to return the lowest bit position after the head. */ object AgePriorityEncoder { def apply(in: Seq[Bool], head: UInt): UInt = { val n = in.size val width = log2Ceil(in.size) val n_padded = 1 << width val temp_vec = (0 until n_padded).map(i => if (i < n) in(i) && i.U >= head else false.B) ++ in val idx = PriorityEncoder(temp_vec) idx(width-1, 0) //discard msb } } /** * Object to determine whether queue * index i0 is older than index i1. */ object IsOlder { def apply(i0: UInt, i1: UInt, head: UInt) = ((i0 < i1) ^ (i0 < head) ^ (i1 < head)) } /** * Set all bits at or below the highest order '1'. */ object MaskLower { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => in >> i.U).reduce(_|_) } } /** * Set all bits at or above the lowest order '1'. */ object MaskUpper { def apply(in: UInt) = { val n = in.getWidth (0 until n).map(i => (in << i.U)(n-1,0)).reduce(_|_) } } /** * Transpose a matrix of Chisel Vecs. */ object Transpose { def apply[T <: chisel3.Data](in: Vec[Vec[T]]) = { val n = in(0).size VecInit((0 until n).map(i => VecInit(in.map(row => row(i))))) } } /** * N-wide one-hot priority encoder. */ object SelectFirstN { def apply(in: UInt, n: Int) = { val sels = Wire(Vec(n, UInt(in.getWidth.W))) var mask = in for (i <- 0 until n) { sels(i) := PriorityEncoderOH(mask) mask = mask & ~sels(i) } sels } } /** * Connect the first k of n valid input interfaces to k output interfaces. */ class Compactor[T <: chisel3.Data](n: Int, k: Int, gen: T) extends Module { require(n >= k) val io = IO(new Bundle { val in = Vec(n, Flipped(DecoupledIO(gen))) val out = Vec(k, DecoupledIO(gen)) }) if (n == k) { io.out <> io.in } else { val counts = io.in.map(_.valid).scanLeft(1.U(k.W)) ((c,e) => Mux(e, (c<<1)(k-1,0), c)) val sels = Transpose(VecInit(counts map (c => VecInit(c.asBools)))) map (col => (col zip io.in.map(_.valid)) map {case (c,v) => c && v}) val in_readys = counts map (row => (row.asBools zip io.out.map(_.ready)) map {case (c,r) => c && r} reduce (_||_)) val out_valids = sels map (col => col.reduce(_||_)) val out_data = sels map (s => Mux1H(s, io.in.map(_.bits))) in_readys zip io.in foreach {case (r,i) => i.ready := r} out_valids zip out_data zip io.out foreach {case ((v,d),o) => o.valid := v; o.bits := d} } } /** * Create a queue that can be killed with a branch kill signal. * Assumption: enq.valid only high if not killed by branch (so don't check IsKilled on io.enq). */ class BranchKillableQueue[T <: boom.v3.common.HasBoomUOP](gen: T, entries: Int, flush_fn: boom.v3.common.MicroOp => Bool = u => true.B, flow: Boolean = true) (implicit p: org.chipsalliance.cde.config.Parameters) extends boom.v3.common.BoomModule()(p) with boom.v3.common.HasBoomCoreParameters { val io = IO(new Bundle { val enq = Flipped(Decoupled(gen)) val deq = Decoupled(gen) val brupdate = Input(new BrUpdateInfo()) val flush = Input(Bool()) val empty = Output(Bool()) val count = Output(UInt(log2Ceil(entries).W)) }) val ram = Mem(entries, gen) val valids = RegInit(VecInit(Seq.fill(entries) {false.B})) val uops = Reg(Vec(entries, new MicroOp)) val enq_ptr = Counter(entries) val deq_ptr = Counter(entries) val maybe_full = RegInit(false.B) val ptr_match = enq_ptr.value === deq_ptr.value io.empty := ptr_match && !maybe_full val full = ptr_match && maybe_full val do_enq = WireInit(io.enq.fire) val do_deq = WireInit((io.deq.ready || !valids(deq_ptr.value)) && !io.empty) for (i <- 0 until entries) { val mask = uops(i).br_mask val uop = uops(i) valids(i) := valids(i) && !IsKilledByBranch(io.brupdate, mask) && !(io.flush && flush_fn(uop)) when (valids(i)) { uops(i).br_mask := GetNewBrMask(io.brupdate, mask) } } when (do_enq) { ram(enq_ptr.value) := io.enq.bits valids(enq_ptr.value) := true.B //!IsKilledByBranch(io.brupdate, io.enq.bits.uop) uops(enq_ptr.value) := io.enq.bits.uop uops(enq_ptr.value).br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) enq_ptr.inc() } when (do_deq) { valids(deq_ptr.value) := false.B deq_ptr.inc() } when (do_enq =/= do_deq) { maybe_full := do_enq } io.enq.ready := !full val out = Wire(gen) out := ram(deq_ptr.value) out.uop := uops(deq_ptr.value) io.deq.valid := !io.empty && valids(deq_ptr.value) && !IsKilledByBranch(io.brupdate, out.uop) && !(io.flush && flush_fn(out.uop)) io.deq.bits := out io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, out.uop) // For flow queue behavior. if (flow) { when (io.empty) { io.deq.valid := io.enq.valid //&& !IsKilledByBranch(io.brupdate, io.enq.bits.uop) io.deq.bits := io.enq.bits io.deq.bits.uop.br_mask := GetNewBrMask(io.brupdate, io.enq.bits.uop) do_deq := false.B when (io.deq.ready) { do_enq := false.B } } } private val ptr_diff = enq_ptr.value - deq_ptr.value if (isPow2(entries)) { io.count := Cat(maybe_full && ptr_match, ptr_diff) } else { io.count := Mux(ptr_match, Mux(maybe_full, entries.asUInt, 0.U), Mux(deq_ptr.value > enq_ptr.value, entries.asUInt + ptr_diff, ptr_diff)) } } // ------------------------------------------ // Printf helper functions // ------------------------------------------ object BoolToChar { /** * Take in a Chisel Bool and convert it into a Str * based on the Chars given * * @param c_bool Chisel Bool * @param trueChar Scala Char if bool is true * @param falseChar Scala Char if bool is false * @return UInt ASCII Char for "trueChar" or "falseChar" */ def apply(c_bool: Bool, trueChar: Char, falseChar: Char = '-'): UInt = { Mux(c_bool, Str(trueChar), Str(falseChar)) } } object CfiTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param cfi_type specific cfi type * @return Vec of Strs (must be indexed to get specific char) */ def apply(cfi_type: UInt) = { val strings = Seq("----", "BR ", "JAL ", "JALR") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(cfi_type) } } object BpdTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param bpd_type specific bpd type * @return Vec of Strs (must be indexed to get specific char) */ def apply(bpd_type: UInt) = { val strings = Seq("BR ", "JUMP", "----", "RET ", "----", "CALL", "----", "----") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(bpd_type) } } object RobTypeToChars { /** * Get a Vec of Strs that can be used for printing * * @param rob_type specific rob type * @return Vec of Strs (must be indexed to get specific char) */ def apply(rob_type: UInt) = { val strings = Seq("RST", "NML", "RBK", " WT") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(rob_type) } } object XRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param xreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(xreg: UInt) = { val strings = Seq(" x0", " ra", " sp", " gp", " tp", " t0", " t1", " t2", " s0", " s1", " a0", " a1", " a2", " a3", " a4", " a5", " a6", " a7", " s2", " s3", " s4", " s5", " s6", " s7", " s8", " s9", "s10", "s11", " t3", " t4", " t5", " t6") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(xreg) } } object FPRegToChars { /** * Get a Vec of Strs that can be used for printing * * @param fpreg specific register number * @return Vec of Strs (must be indexed to get specific char) */ def apply(fpreg: UInt) = { val strings = Seq(" ft0", " ft1", " ft2", " ft3", " ft4", " ft5", " ft6", " ft7", " fs0", " fs1", " fa0", " fa1", " fa2", " fa3", " fa4", " fa5", " fa6", " fa7", " fs2", " fs3", " fs4", " fs5", " fs6", " fs7", " fs8", " fs9", "fs10", "fs11", " ft8", " ft9", "ft10", "ft11") val multiVec = VecInit(for(string <- strings) yield { VecInit(for (c <- string) yield { Str(c) }) }) multiVec(fpreg) } } object BoomCoreStringPrefix { /** * Add prefix to BOOM strings (currently only adds the hartId) * * @param strs list of strings * @return String combining the list with the prefix per line */ def apply(strs: String*)(implicit p: Parameters) = { val prefix = "[C" + s"${p(TileKey).tileId}" + "] " strs.map(str => prefix + str + "\n").mkString("") } } File consts.scala: //****************************************************************************** // Copyright (c) 2011 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Constants //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ package boom.v3.common.constants import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util.Str import freechips.rocketchip.rocket.RVCExpander /** * Mixin for issue queue types */ trait IQType { val IQT_SZ = 3 val IQT_INT = 1.U(IQT_SZ.W) val IQT_MEM = 2.U(IQT_SZ.W) val IQT_FP = 4.U(IQT_SZ.W) val IQT_MFP = 6.U(IQT_SZ.W) } /** * Mixin for scalar operation constants */ trait ScalarOpConstants { val X = BitPat("b?") val Y = BitPat("b1") val N = BitPat("b0") //************************************ // Extra Constants // Which branch predictor predicted us val BSRC_SZ = 2 val BSRC_1 = 0.U(BSRC_SZ.W) // 1-cycle branch pred val BSRC_2 = 1.U(BSRC_SZ.W) // 2-cycle branch pred val BSRC_3 = 2.U(BSRC_SZ.W) // 3-cycle branch pred val BSRC_C = 3.U(BSRC_SZ.W) // core branch resolution //************************************ // Control Signals // CFI types val CFI_SZ = 3 val CFI_X = 0.U(CFI_SZ.W) // Not a CFI instruction val CFI_BR = 1.U(CFI_SZ.W) // Branch val CFI_JAL = 2.U(CFI_SZ.W) // JAL val CFI_JALR = 3.U(CFI_SZ.W) // JALR // PC Select Signal val PC_PLUS4 = 0.U(2.W) // PC + 4 val PC_BRJMP = 1.U(2.W) // brjmp_target val PC_JALR = 2.U(2.W) // jump_reg_target // Branch Type val BR_N = 0.U(4.W) // Next val BR_NE = 1.U(4.W) // Branch on NotEqual val BR_EQ = 2.U(4.W) // Branch on Equal val BR_GE = 3.U(4.W) // Branch on Greater/Equal val BR_GEU = 4.U(4.W) // Branch on Greater/Equal Unsigned val BR_LT = 5.U(4.W) // Branch on Less Than val BR_LTU = 6.U(4.W) // Branch on Less Than Unsigned val BR_J = 7.U(4.W) // Jump val BR_JR = 8.U(4.W) // Jump Register // RS1 Operand Select Signal val OP1_RS1 = 0.U(2.W) // Register Source #1 val OP1_ZERO= 1.U(2.W) val OP1_PC = 2.U(2.W) val OP1_X = BitPat("b??") // RS2 Operand Select Signal val OP2_RS2 = 0.U(3.W) // Register Source #2 val OP2_IMM = 1.U(3.W) // immediate val OP2_ZERO= 2.U(3.W) // constant 0 val OP2_NEXT= 3.U(3.W) // constant 2/4 (for PC+2/4) val OP2_IMMC= 4.U(3.W) // for CSR imm found in RS1 val OP2_X = BitPat("b???") // Register File Write Enable Signal val REN_0 = false.B val REN_1 = true.B // Is 32b Word or 64b Doubldword? val SZ_DW = 1 val DW_X = true.B // Bool(xLen==64) val DW_32 = false.B val DW_64 = true.B val DW_XPR = true.B // Bool(xLen==64) // Memory Enable Signal val MEN_0 = false.B val MEN_1 = true.B val MEN_X = false.B // Immediate Extend Select val IS_I = 0.U(3.W) // I-Type (LD,ALU) val IS_S = 1.U(3.W) // S-Type (ST) val IS_B = 2.U(3.W) // SB-Type (BR) val IS_U = 3.U(3.W) // U-Type (LUI/AUIPC) val IS_J = 4.U(3.W) // UJ-Type (J/JAL) val IS_X = BitPat("b???") // Decode Stage Control Signals val RT_FIX = 0.U(2.W) val RT_FLT = 1.U(2.W) val RT_PAS = 3.U(2.W) // pass-through (prs1 := lrs1, etc) val RT_X = 2.U(2.W) // not-a-register (but shouldn't get a busy-bit, etc.) // TODO rename RT_NAR // Micro-op opcodes // TODO change micro-op opcodes into using enum val UOPC_SZ = 7 val uopX = BitPat.dontCare(UOPC_SZ) val uopNOP = 0.U(UOPC_SZ.W) val uopLD = 1.U(UOPC_SZ.W) val uopSTA = 2.U(UOPC_SZ.W) // store address generation val uopSTD = 3.U(UOPC_SZ.W) // store data generation val uopLUI = 4.U(UOPC_SZ.W) val uopADDI = 5.U(UOPC_SZ.W) val uopANDI = 6.U(UOPC_SZ.W) val uopORI = 7.U(UOPC_SZ.W) val uopXORI = 8.U(UOPC_SZ.W) val uopSLTI = 9.U(UOPC_SZ.W) val uopSLTIU= 10.U(UOPC_SZ.W) val uopSLLI = 11.U(UOPC_SZ.W) val uopSRAI = 12.U(UOPC_SZ.W) val uopSRLI = 13.U(UOPC_SZ.W) val uopSLL = 14.U(UOPC_SZ.W) val uopADD = 15.U(UOPC_SZ.W) val uopSUB = 16.U(UOPC_SZ.W) val uopSLT = 17.U(UOPC_SZ.W) val uopSLTU = 18.U(UOPC_SZ.W) val uopAND = 19.U(UOPC_SZ.W) val uopOR = 20.U(UOPC_SZ.W) val uopXOR = 21.U(UOPC_SZ.W) val uopSRA = 22.U(UOPC_SZ.W) val uopSRL = 23.U(UOPC_SZ.W) val uopBEQ = 24.U(UOPC_SZ.W) val uopBNE = 25.U(UOPC_SZ.W) val uopBGE = 26.U(UOPC_SZ.W) val uopBGEU = 27.U(UOPC_SZ.W) val uopBLT = 28.U(UOPC_SZ.W) val uopBLTU = 29.U(UOPC_SZ.W) val uopCSRRW= 30.U(UOPC_SZ.W) val uopCSRRS= 31.U(UOPC_SZ.W) val uopCSRRC= 32.U(UOPC_SZ.W) val uopCSRRWI=33.U(UOPC_SZ.W) val uopCSRRSI=34.U(UOPC_SZ.W) val uopCSRRCI=35.U(UOPC_SZ.W) val uopJ = 36.U(UOPC_SZ.W) val uopJAL = 37.U(UOPC_SZ.W) val uopJALR = 38.U(UOPC_SZ.W) val uopAUIPC= 39.U(UOPC_SZ.W) //val uopSRET = 40.U(UOPC_SZ.W) val uopCFLSH= 41.U(UOPC_SZ.W) val uopFENCE= 42.U(UOPC_SZ.W) val uopADDIW= 43.U(UOPC_SZ.W) val uopADDW = 44.U(UOPC_SZ.W) val uopSUBW = 45.U(UOPC_SZ.W) val uopSLLIW= 46.U(UOPC_SZ.W) val uopSLLW = 47.U(UOPC_SZ.W) val uopSRAIW= 48.U(UOPC_SZ.W) val uopSRAW = 49.U(UOPC_SZ.W) val uopSRLIW= 50.U(UOPC_SZ.W) val uopSRLW = 51.U(UOPC_SZ.W) val uopMUL = 52.U(UOPC_SZ.W) val uopMULH = 53.U(UOPC_SZ.W) val uopMULHU= 54.U(UOPC_SZ.W) val uopMULHSU=55.U(UOPC_SZ.W) val uopMULW = 56.U(UOPC_SZ.W) val uopDIV = 57.U(UOPC_SZ.W) val uopDIVU = 58.U(UOPC_SZ.W) val uopREM = 59.U(UOPC_SZ.W) val uopREMU = 60.U(UOPC_SZ.W) val uopDIVW = 61.U(UOPC_SZ.W) val uopDIVUW= 62.U(UOPC_SZ.W) val uopREMW = 63.U(UOPC_SZ.W) val uopREMUW= 64.U(UOPC_SZ.W) val uopFENCEI = 65.U(UOPC_SZ.W) // = 66.U(UOPC_SZ.W) val uopAMO_AG = 67.U(UOPC_SZ.W) // AMO-address gen (use normal STD for datagen) val uopFMV_W_X = 68.U(UOPC_SZ.W) val uopFMV_D_X = 69.U(UOPC_SZ.W) val uopFMV_X_W = 70.U(UOPC_SZ.W) val uopFMV_X_D = 71.U(UOPC_SZ.W) val uopFSGNJ_S = 72.U(UOPC_SZ.W) val uopFSGNJ_D = 73.U(UOPC_SZ.W) val uopFCVT_S_D = 74.U(UOPC_SZ.W) val uopFCVT_D_S = 75.U(UOPC_SZ.W) val uopFCVT_S_X = 76.U(UOPC_SZ.W) val uopFCVT_D_X = 77.U(UOPC_SZ.W) val uopFCVT_X_S = 78.U(UOPC_SZ.W) val uopFCVT_X_D = 79.U(UOPC_SZ.W) val uopCMPR_S = 80.U(UOPC_SZ.W) val uopCMPR_D = 81.U(UOPC_SZ.W) val uopFCLASS_S = 82.U(UOPC_SZ.W) val uopFCLASS_D = 83.U(UOPC_SZ.W) val uopFMINMAX_S = 84.U(UOPC_SZ.W) val uopFMINMAX_D = 85.U(UOPC_SZ.W) // = 86.U(UOPC_SZ.W) val uopFADD_S = 87.U(UOPC_SZ.W) val uopFSUB_S = 88.U(UOPC_SZ.W) val uopFMUL_S = 89.U(UOPC_SZ.W) val uopFADD_D = 90.U(UOPC_SZ.W) val uopFSUB_D = 91.U(UOPC_SZ.W) val uopFMUL_D = 92.U(UOPC_SZ.W) val uopFMADD_S = 93.U(UOPC_SZ.W) val uopFMSUB_S = 94.U(UOPC_SZ.W) val uopFNMADD_S = 95.U(UOPC_SZ.W) val uopFNMSUB_S = 96.U(UOPC_SZ.W) val uopFMADD_D = 97.U(UOPC_SZ.W) val uopFMSUB_D = 98.U(UOPC_SZ.W) val uopFNMADD_D = 99.U(UOPC_SZ.W) val uopFNMSUB_D = 100.U(UOPC_SZ.W) val uopFDIV_S = 101.U(UOPC_SZ.W) val uopFDIV_D = 102.U(UOPC_SZ.W) val uopFSQRT_S = 103.U(UOPC_SZ.W) val uopFSQRT_D = 104.U(UOPC_SZ.W) val uopWFI = 105.U(UOPC_SZ.W) // pass uop down the CSR pipeline val uopERET = 106.U(UOPC_SZ.W) // pass uop down the CSR pipeline, also is ERET val uopSFENCE = 107.U(UOPC_SZ.W) val uopROCC = 108.U(UOPC_SZ.W) val uopMOV = 109.U(UOPC_SZ.W) // conditional mov decoded from "add rd, x0, rs2" // The Bubble Instruction (Machine generated NOP) // Insert (XOR x0,x0,x0) which is different from software compiler // generated NOPs which are (ADDI x0, x0, 0). // Reasoning for this is to let visualizers and stat-trackers differentiate // between software NOPs and machine-generated Bubbles in the pipeline. val BUBBLE = (0x4033).U(32.W) def NullMicroOp()(implicit p: Parameters): boom.v3.common.MicroOp = { val uop = Wire(new boom.v3.common.MicroOp) uop := DontCare // Overridden in the following lines uop.uopc := uopNOP // maybe not required, but helps on asserts that try to catch spurious behavior uop.bypassable := false.B uop.fp_val := false.B uop.uses_stq := false.B uop.uses_ldq := false.B uop.pdst := 0.U uop.dst_rtype := RT_X val cs = Wire(new boom.v3.common.CtrlSignals()) cs := DontCare // Overridden in the following lines cs.br_type := BR_N cs.csr_cmd := freechips.rocketchip.rocket.CSR.N cs.is_load := false.B cs.is_sta := false.B cs.is_std := false.B uop.ctrl := cs uop } } /** * Mixin for RISCV constants */ trait RISCVConstants { // abstract out instruction decode magic numbers val RD_MSB = 11 val RD_LSB = 7 val RS1_MSB = 19 val RS1_LSB = 15 val RS2_MSB = 24 val RS2_LSB = 20 val RS3_MSB = 31 val RS3_LSB = 27 val CSR_ADDR_MSB = 31 val CSR_ADDR_LSB = 20 val CSR_ADDR_SZ = 12 // location of the fifth bit in the shamt (for checking for illegal ops for SRAIW,etc.) val SHAMT_5_BIT = 25 val LONGEST_IMM_SZ = 20 val X0 = 0.U val RA = 1.U // return address register // memory consistency model // The C/C++ atomics MCM requires that two loads to the same address maintain program order. // The Cortex A9 does NOT enforce load/load ordering (which leads to buggy behavior). val MCM_ORDER_DEPENDENT_LOADS = true val jal_opc = (0x6f).U val jalr_opc = (0x67).U def GetUop(inst: UInt): UInt = inst(6,0) def GetRd (inst: UInt): UInt = inst(RD_MSB,RD_LSB) def GetRs1(inst: UInt): UInt = inst(RS1_MSB,RS1_LSB) def ExpandRVC(inst: UInt)(implicit p: Parameters): UInt = { val rvc_exp = Module(new RVCExpander) rvc_exp.io.in := inst Mux(rvc_exp.io.rvc, rvc_exp.io.out.bits, inst) } // Note: Accepts only EXPANDED rvc instructions def ComputeBranchTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val b_imm32 = Cat(Fill(20,inst(31)), inst(7), inst(30,25), inst(11,8), 0.U(1.W)) ((pc.asSInt + b_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def ComputeJALTarget(pc: UInt, inst: UInt, xlen: Int)(implicit p: Parameters): UInt = { val j_imm32 = Cat(Fill(12,inst(31)), inst(19,12), inst(20), inst(30,25), inst(24,21), 0.U(1.W)) ((pc.asSInt + j_imm32.asSInt).asSInt & (-2).S).asUInt } // Note: Accepts only EXPANDED rvc instructions def GetCfiType(inst: UInt)(implicit p: Parameters): UInt = { val bdecode = Module(new boom.v3.exu.BranchDecode) bdecode.io.inst := inst bdecode.io.pc := 0.U bdecode.io.out.cfi_type } } /** * Mixin for exception cause constants */ trait ExcCauseConstants { // a memory disambigious misspeculation occurred val MINI_EXCEPTION_MEM_ORDERING = 16.U val MINI_EXCEPTION_CSR_REPLAY = 17.U require (!freechips.rocketchip.rocket.Causes.all.contains(16)) require (!freechips.rocketchip.rocket.Causes.all.contains(17)) } File issue-slot.scala: //****************************************************************************** // Copyright (c) 2015 - 2018, The Regents of the University of California (Regents). // All Rights Reserved. See LICENSE and LICENSE.SiFive for license details. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // RISCV Processor Issue Slot Logic //-------------------------------------------------------------------------- //------------------------------------------------------------------------------ // // Note: stores (and AMOs) are "broken down" into 2 uops, but stored within a single issue-slot. // TODO XXX make a separate issueSlot for MemoryIssueSlots, and only they break apart stores. // TODO Disable ldspec for FP queue. package boom.v3.exu import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config.Parameters import boom.v3.common._ import boom.v3.util._ import FUConstants._ /** * IO bundle to interact with Issue slot * * @param numWakeupPorts number of wakeup ports for the slot */ class IssueSlotIO(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomBundle { val valid = Output(Bool()) val will_be_valid = Output(Bool()) // TODO code review, do we need this signal so explicitely? val request = Output(Bool()) val request_hp = Output(Bool()) val grant = Input(Bool()) val brupdate = Input(new BrUpdateInfo()) val kill = Input(Bool()) // pipeline flush val clear = Input(Bool()) // entry being moved elsewhere (not mutually exclusive with grant) val ldspec_miss = Input(Bool()) // Previous cycle's speculative load wakeup was mispredicted. val wakeup_ports = Flipped(Vec(numWakeupPorts, Valid(new IqWakeup(maxPregSz)))) val pred_wakeup_port = Flipped(Valid(UInt(log2Ceil(ftqSz).W))) val spec_ld_wakeup = Flipped(Vec(memWidth, Valid(UInt(width=maxPregSz.W)))) val in_uop = Flipped(Valid(new MicroOp())) // if valid, this WILL overwrite an entry! val out_uop = Output(new MicroOp()) // the updated slot uop; will be shifted upwards in a collasping queue. val uop = Output(new MicroOp()) // the current Slot's uop. Sent down the pipeline when issued. val debug = { val result = new Bundle { val p1 = Bool() val p2 = Bool() val p3 = Bool() val ppred = Bool() val state = UInt(width=2.W) } Output(result) } } /** * Single issue slot. Holds a uop within the issue queue * * @param numWakeupPorts number of wakeup ports */ class IssueSlot(val numWakeupPorts: Int)(implicit p: Parameters) extends BoomModule with IssueUnitConstants { val io = IO(new IssueSlotIO(numWakeupPorts)) // slot invalid? // slot is valid, holding 1 uop // slot is valid, holds 2 uops (like a store) def is_invalid = state === s_invalid def is_valid = state =/= s_invalid val next_state = Wire(UInt()) // the next state of this slot (which might then get moved to a new slot) val next_uopc = Wire(UInt()) // the next uopc of this slot (which might then get moved to a new slot) val next_lrs1_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot) val next_lrs2_rtype = Wire(UInt()) // the next reg type of this slot (which might then get moved to a new slot) val state = RegInit(s_invalid) val p1 = RegInit(false.B) val p2 = RegInit(false.B) val p3 = RegInit(false.B) val ppred = RegInit(false.B) // Poison if woken up by speculative load. // Poison lasts 1 cycle (as ldMiss will come on the next cycle). // SO if poisoned is true, set it to false! val p1_poisoned = RegInit(false.B) val p2_poisoned = RegInit(false.B) p1_poisoned := false.B p2_poisoned := false.B val next_p1_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p1_poisoned, p1_poisoned) val next_p2_poisoned = Mux(io.in_uop.valid, io.in_uop.bits.iw_p2_poisoned, p2_poisoned) val slot_uop = RegInit(NullMicroOp) val next_uop = Mux(io.in_uop.valid, io.in_uop.bits, slot_uop) //----------------------------------------------------------------------------- // next slot state computation // compute the next state for THIS entry slot (in a collasping queue, the // current uop may get moved elsewhere, and a new uop can enter when (io.kill) { state := s_invalid } .elsewhen (io.in_uop.valid) { state := io.in_uop.bits.iw_state } .elsewhen (io.clear) { state := s_invalid } .otherwise { state := next_state } //----------------------------------------------------------------------------- // "update" state // compute the next state for the micro-op in this slot. This micro-op may // be moved elsewhere, so the "next_state" travels with it. // defaults next_state := state next_uopc := slot_uop.uopc next_lrs1_rtype := slot_uop.lrs1_rtype next_lrs2_rtype := slot_uop.lrs2_rtype when (io.kill) { next_state := s_invalid } .elsewhen ((io.grant && (state === s_valid_1)) || (io.grant && (state === s_valid_2) && p1 && p2 && ppred)) { // try to issue this uop. when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) { next_state := s_invalid } } .elsewhen (io.grant && (state === s_valid_2)) { when (!(io.ldspec_miss && (p1_poisoned || p2_poisoned))) { next_state := s_valid_1 when (p1) { slot_uop.uopc := uopSTD next_uopc := uopSTD slot_uop.lrs1_rtype := RT_X next_lrs1_rtype := RT_X } .otherwise { slot_uop.lrs2_rtype := RT_X next_lrs2_rtype := RT_X } } } when (io.in_uop.valid) { slot_uop := io.in_uop.bits assert (is_invalid || io.clear || io.kill, "trying to overwrite a valid issue slot.") } // Wakeup Compare Logic // these signals are the "next_p*" for the current slot's micro-op. // they are important for shifting the current slot_uop up to an other entry. val next_p1 = WireInit(p1) val next_p2 = WireInit(p2) val next_p3 = WireInit(p3) val next_ppred = WireInit(ppred) when (io.in_uop.valid) { p1 := !(io.in_uop.bits.prs1_busy) p2 := !(io.in_uop.bits.prs2_busy) p3 := !(io.in_uop.bits.prs3_busy) ppred := !(io.in_uop.bits.ppred_busy) } when (io.ldspec_miss && next_p1_poisoned) { assert(next_uop.prs1 =/= 0.U, "Poison bit can't be set for prs1=x0!") p1 := false.B } when (io.ldspec_miss && next_p2_poisoned) { assert(next_uop.prs2 =/= 0.U, "Poison bit can't be set for prs2=x0!") p2 := false.B } for (i <- 0 until numWakeupPorts) { when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs1)) { p1 := true.B } when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs2)) { p2 := true.B } when (io.wakeup_ports(i).valid && (io.wakeup_ports(i).bits.pdst === next_uop.prs3)) { p3 := true.B } } when (io.pred_wakeup_port.valid && io.pred_wakeup_port.bits === next_uop.ppred) { ppred := true.B } for (w <- 0 until memWidth) { assert (!(io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === 0.U), "Loads to x0 should never speculatively wakeup other instructions") } // TODO disable if FP IQ. for (w <- 0 until memWidth) { when (io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === next_uop.prs1 && next_uop.lrs1_rtype === RT_FIX) { p1 := true.B p1_poisoned := true.B assert (!next_p1_poisoned) } when (io.spec_ld_wakeup(w).valid && io.spec_ld_wakeup(w).bits === next_uop.prs2 && next_uop.lrs2_rtype === RT_FIX) { p2 := true.B p2_poisoned := true.B assert (!next_p2_poisoned) } } // Handle branch misspeculations val next_br_mask = GetNewBrMask(io.brupdate, slot_uop) // was this micro-op killed by a branch? if yes, we can't let it be valid if // we compact it into an other entry when (IsKilledByBranch(io.brupdate, slot_uop)) { next_state := s_invalid } when (!io.in_uop.valid) { slot_uop.br_mask := next_br_mask } //------------------------------------------------------------- // Request Logic io.request := is_valid && p1 && p2 && p3 && ppred && !io.kill val high_priority = slot_uop.is_br || slot_uop.is_jal || slot_uop.is_jalr io.request_hp := io.request && high_priority when (state === s_valid_1) { io.request := p1 && p2 && p3 && ppred && !io.kill } .elsewhen (state === s_valid_2) { io.request := (p1 || p2) && ppred && !io.kill } .otherwise { io.request := false.B } //assign outputs io.valid := is_valid io.uop := slot_uop io.uop.iw_p1_poisoned := p1_poisoned io.uop.iw_p2_poisoned := p2_poisoned // micro-op will vacate due to grant. val may_vacate = io.grant && ((state === s_valid_1) || (state === s_valid_2) && p1 && p2 && ppred) val squash_grant = io.ldspec_miss && (p1_poisoned || p2_poisoned) io.will_be_valid := is_valid && !(may_vacate && !squash_grant) io.out_uop := slot_uop io.out_uop.iw_state := next_state io.out_uop.uopc := next_uopc io.out_uop.lrs1_rtype := next_lrs1_rtype io.out_uop.lrs2_rtype := next_lrs2_rtype io.out_uop.br_mask := next_br_mask io.out_uop.prs1_busy := !p1 io.out_uop.prs2_busy := !p2 io.out_uop.prs3_busy := !p3 io.out_uop.ppred_busy := !ppred io.out_uop.iw_p1_poisoned := p1_poisoned io.out_uop.iw_p2_poisoned := p2_poisoned when (state === s_valid_2) { when (p1 && p2 && ppred) { ; // send out the entire instruction as one uop } .elsewhen (p1 && ppred) { io.uop.uopc := slot_uop.uopc io.uop.lrs2_rtype := RT_X } .elsewhen (p2 && ppred) { io.uop.uopc := uopSTD io.uop.lrs1_rtype := RT_X } } // debug outputs io.debug.p1 := p1 io.debug.p2 := p2 io.debug.p3 := p3 io.debug.ppred := ppred io.debug.state := state }
module IssueSlot_93( // @[issue-slot.scala:69:7] input clock, // @[issue-slot.scala:69:7] input reset, // @[issue-slot.scala:69:7] output io_valid, // @[issue-slot.scala:73:14] output io_will_be_valid, // @[issue-slot.scala:73:14] output io_request, // @[issue-slot.scala:73:14] output io_request_hp, // @[issue-slot.scala:73:14] input io_grant, // @[issue-slot.scala:73:14] input [15:0] io_brupdate_b1_resolve_mask, // @[issue-slot.scala:73:14] input [15:0] io_brupdate_b1_mispredict_mask, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_uopc, // @[issue-slot.scala:73:14] input [31:0] io_brupdate_b2_uop_inst, // @[issue-slot.scala:73:14] input [31:0] io_brupdate_b2_uop_debug_inst, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_rvc, // @[issue-slot.scala:73:14] input [39:0] io_brupdate_b2_uop_debug_pc, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_iq_type, // @[issue-slot.scala:73:14] input [9:0] io_brupdate_b2_uop_fu_code, // @[issue-slot.scala:73:14] input [3:0] io_brupdate_b2_uop_ctrl_br_type, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_load, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ctrl_is_std, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_iw_state, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_iw_p1_poisoned, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_iw_p2_poisoned, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_br, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_jalr, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_jal, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_sfb, // @[issue-slot.scala:73:14] input [15:0] io_brupdate_b2_uop_br_mask, // @[issue-slot.scala:73:14] input [3:0] io_brupdate_b2_uop_br_tag, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ftq_idx, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_edge_inst, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_pc_lob, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_taken, // @[issue-slot.scala:73:14] input [19:0] io_brupdate_b2_uop_imm_packed, // @[issue-slot.scala:73:14] input [11:0] io_brupdate_b2_uop_csr_addr, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_rob_idx, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ldq_idx, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_stq_idx, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_rxq_idx, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_pdst, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_prs1, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_prs2, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_prs3, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_ppred, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs1_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs2_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_prs3_busy, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ppred_busy, // @[issue-slot.scala:73:14] input [6:0] io_brupdate_b2_uop_stale_pdst, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_exception, // @[issue-slot.scala:73:14] input [63:0] io_brupdate_b2_uop_exc_cause, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bypassable, // @[issue-slot.scala:73:14] input [4:0] io_brupdate_b2_uop_mem_cmd, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_mem_size, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_mem_signed, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_fence, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_fencei, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_amo, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_uses_ldq, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_uses_stq, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_is_unique, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_flush_on_commit, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_ldst, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs1, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs2, // @[issue-slot.scala:73:14] input [5:0] io_brupdate_b2_uop_lrs3, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_ldst_val, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_dst_rtype, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_lrs1_rtype, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_lrs2_rtype, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_frs3_en, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_fp_val, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_fp_single, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bp_debug_if, // @[issue-slot.scala:73:14] input io_brupdate_b2_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_debug_fsrc, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_uop_debug_tsrc, // @[issue-slot.scala:73:14] input io_brupdate_b2_valid, // @[issue-slot.scala:73:14] input io_brupdate_b2_mispredict, // @[issue-slot.scala:73:14] input io_brupdate_b2_taken, // @[issue-slot.scala:73:14] input [2:0] io_brupdate_b2_cfi_type, // @[issue-slot.scala:73:14] input [1:0] io_brupdate_b2_pc_sel, // @[issue-slot.scala:73:14] input [39:0] io_brupdate_b2_jalr_target, // @[issue-slot.scala:73:14] input [20:0] io_brupdate_b2_target_offset, // @[issue-slot.scala:73:14] input io_kill, // @[issue-slot.scala:73:14] input io_clear, // @[issue-slot.scala:73:14] input io_wakeup_ports_0_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_0_bits_pdst, // @[issue-slot.scala:73:14] input io_wakeup_ports_1_valid, // @[issue-slot.scala:73:14] input [6:0] io_wakeup_ports_1_bits_pdst, // @[issue-slot.scala:73:14] input io_in_uop_valid, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_uopc, // @[issue-slot.scala:73:14] input [31:0] io_in_uop_bits_inst, // @[issue-slot.scala:73:14] input [31:0] io_in_uop_bits_debug_inst, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_rvc, // @[issue-slot.scala:73:14] input [39:0] io_in_uop_bits_debug_pc, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_iq_type, // @[issue-slot.scala:73:14] input [9:0] io_in_uop_bits_fu_code, // @[issue-slot.scala:73:14] input [3:0] io_in_uop_bits_ctrl_br_type, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_ctrl_op1_sel, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_op2_sel, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_imm_sel, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ctrl_op_fcn, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_fcn_dw, // @[issue-slot.scala:73:14] input [2:0] io_in_uop_bits_ctrl_csr_cmd, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_load, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_sta, // @[issue-slot.scala:73:14] input io_in_uop_bits_ctrl_is_std, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_iw_state, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_br, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_jalr, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_jal, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_sfb, // @[issue-slot.scala:73:14] input [15:0] io_in_uop_bits_br_mask, // @[issue-slot.scala:73:14] input [3:0] io_in_uop_bits_br_tag, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ftq_idx, // @[issue-slot.scala:73:14] input io_in_uop_bits_edge_inst, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_pc_lob, // @[issue-slot.scala:73:14] input io_in_uop_bits_taken, // @[issue-slot.scala:73:14] input [19:0] io_in_uop_bits_imm_packed, // @[issue-slot.scala:73:14] input [11:0] io_in_uop_bits_csr_addr, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_rob_idx, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ldq_idx, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_stq_idx, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_rxq_idx, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_pdst, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_prs1, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_prs2, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_prs3, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_ppred, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs1_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs2_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_prs3_busy, // @[issue-slot.scala:73:14] input io_in_uop_bits_ppred_busy, // @[issue-slot.scala:73:14] input [6:0] io_in_uop_bits_stale_pdst, // @[issue-slot.scala:73:14] input io_in_uop_bits_exception, // @[issue-slot.scala:73:14] input [63:0] io_in_uop_bits_exc_cause, // @[issue-slot.scala:73:14] input io_in_uop_bits_bypassable, // @[issue-slot.scala:73:14] input [4:0] io_in_uop_bits_mem_cmd, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_mem_size, // @[issue-slot.scala:73:14] input io_in_uop_bits_mem_signed, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_fence, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_fencei, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_amo, // @[issue-slot.scala:73:14] input io_in_uop_bits_uses_ldq, // @[issue-slot.scala:73:14] input io_in_uop_bits_uses_stq, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_sys_pc2epc, // @[issue-slot.scala:73:14] input io_in_uop_bits_is_unique, // @[issue-slot.scala:73:14] input io_in_uop_bits_flush_on_commit, // @[issue-slot.scala:73:14] input io_in_uop_bits_ldst_is_rs1, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_ldst, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs1, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs2, // @[issue-slot.scala:73:14] input [5:0] io_in_uop_bits_lrs3, // @[issue-slot.scala:73:14] input io_in_uop_bits_ldst_val, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_dst_rtype, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_lrs1_rtype, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_lrs2_rtype, // @[issue-slot.scala:73:14] input io_in_uop_bits_frs3_en, // @[issue-slot.scala:73:14] input io_in_uop_bits_fp_val, // @[issue-slot.scala:73:14] input io_in_uop_bits_fp_single, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_pf_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_ae_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_xcpt_ma_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_bp_debug_if, // @[issue-slot.scala:73:14] input io_in_uop_bits_bp_xcpt_if, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_debug_fsrc, // @[issue-slot.scala:73:14] input [1:0] io_in_uop_bits_debug_tsrc, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_uopc, // @[issue-slot.scala:73:14] output [31:0] io_out_uop_inst, // @[issue-slot.scala:73:14] output [31:0] io_out_uop_debug_inst, // @[issue-slot.scala:73:14] output io_out_uop_is_rvc, // @[issue-slot.scala:73:14] output [39:0] io_out_uop_debug_pc, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_iq_type, // @[issue-slot.scala:73:14] output [9:0] io_out_uop_fu_code, // @[issue-slot.scala:73:14] output [3:0] io_out_uop_ctrl_br_type, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] output [2:0] io_out_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_load, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] output io_out_uop_ctrl_is_std, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_iw_state, // @[issue-slot.scala:73:14] output io_out_uop_is_br, // @[issue-slot.scala:73:14] output io_out_uop_is_jalr, // @[issue-slot.scala:73:14] output io_out_uop_is_jal, // @[issue-slot.scala:73:14] output io_out_uop_is_sfb, // @[issue-slot.scala:73:14] output [15:0] io_out_uop_br_mask, // @[issue-slot.scala:73:14] output [3:0] io_out_uop_br_tag, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ftq_idx, // @[issue-slot.scala:73:14] output io_out_uop_edge_inst, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_pc_lob, // @[issue-slot.scala:73:14] output io_out_uop_taken, // @[issue-slot.scala:73:14] output [19:0] io_out_uop_imm_packed, // @[issue-slot.scala:73:14] output [11:0] io_out_uop_csr_addr, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_rob_idx, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ldq_idx, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_stq_idx, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_rxq_idx, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_pdst, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_prs1, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_prs2, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_prs3, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_ppred, // @[issue-slot.scala:73:14] output io_out_uop_prs1_busy, // @[issue-slot.scala:73:14] output io_out_uop_prs2_busy, // @[issue-slot.scala:73:14] output io_out_uop_prs3_busy, // @[issue-slot.scala:73:14] output io_out_uop_ppred_busy, // @[issue-slot.scala:73:14] output [6:0] io_out_uop_stale_pdst, // @[issue-slot.scala:73:14] output io_out_uop_exception, // @[issue-slot.scala:73:14] output [63:0] io_out_uop_exc_cause, // @[issue-slot.scala:73:14] output io_out_uop_bypassable, // @[issue-slot.scala:73:14] output [4:0] io_out_uop_mem_cmd, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_mem_size, // @[issue-slot.scala:73:14] output io_out_uop_mem_signed, // @[issue-slot.scala:73:14] output io_out_uop_is_fence, // @[issue-slot.scala:73:14] output io_out_uop_is_fencei, // @[issue-slot.scala:73:14] output io_out_uop_is_amo, // @[issue-slot.scala:73:14] output io_out_uop_uses_ldq, // @[issue-slot.scala:73:14] output io_out_uop_uses_stq, // @[issue-slot.scala:73:14] output io_out_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] output io_out_uop_is_unique, // @[issue-slot.scala:73:14] output io_out_uop_flush_on_commit, // @[issue-slot.scala:73:14] output io_out_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_ldst, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs1, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs2, // @[issue-slot.scala:73:14] output [5:0] io_out_uop_lrs3, // @[issue-slot.scala:73:14] output io_out_uop_ldst_val, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_dst_rtype, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_lrs1_rtype, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_lrs2_rtype, // @[issue-slot.scala:73:14] output io_out_uop_frs3_en, // @[issue-slot.scala:73:14] output io_out_uop_fp_val, // @[issue-slot.scala:73:14] output io_out_uop_fp_single, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] output io_out_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] output io_out_uop_bp_debug_if, // @[issue-slot.scala:73:14] output io_out_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_debug_fsrc, // @[issue-slot.scala:73:14] output [1:0] io_out_uop_debug_tsrc, // @[issue-slot.scala:73:14] output [6:0] io_uop_uopc, // @[issue-slot.scala:73:14] output [31:0] io_uop_inst, // @[issue-slot.scala:73:14] output [31:0] io_uop_debug_inst, // @[issue-slot.scala:73:14] output io_uop_is_rvc, // @[issue-slot.scala:73:14] output [39:0] io_uop_debug_pc, // @[issue-slot.scala:73:14] output [2:0] io_uop_iq_type, // @[issue-slot.scala:73:14] output [9:0] io_uop_fu_code, // @[issue-slot.scala:73:14] output [3:0] io_uop_ctrl_br_type, // @[issue-slot.scala:73:14] output [1:0] io_uop_ctrl_op1_sel, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_op2_sel, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_imm_sel, // @[issue-slot.scala:73:14] output [4:0] io_uop_ctrl_op_fcn, // @[issue-slot.scala:73:14] output io_uop_ctrl_fcn_dw, // @[issue-slot.scala:73:14] output [2:0] io_uop_ctrl_csr_cmd, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_load, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_sta, // @[issue-slot.scala:73:14] output io_uop_ctrl_is_std, // @[issue-slot.scala:73:14] output [1:0] io_uop_iw_state, // @[issue-slot.scala:73:14] output io_uop_is_br, // @[issue-slot.scala:73:14] output io_uop_is_jalr, // @[issue-slot.scala:73:14] output io_uop_is_jal, // @[issue-slot.scala:73:14] output io_uop_is_sfb, // @[issue-slot.scala:73:14] output [15:0] io_uop_br_mask, // @[issue-slot.scala:73:14] output [3:0] io_uop_br_tag, // @[issue-slot.scala:73:14] output [4:0] io_uop_ftq_idx, // @[issue-slot.scala:73:14] output io_uop_edge_inst, // @[issue-slot.scala:73:14] output [5:0] io_uop_pc_lob, // @[issue-slot.scala:73:14] output io_uop_taken, // @[issue-slot.scala:73:14] output [19:0] io_uop_imm_packed, // @[issue-slot.scala:73:14] output [11:0] io_uop_csr_addr, // @[issue-slot.scala:73:14] output [6:0] io_uop_rob_idx, // @[issue-slot.scala:73:14] output [4:0] io_uop_ldq_idx, // @[issue-slot.scala:73:14] output [4:0] io_uop_stq_idx, // @[issue-slot.scala:73:14] output [1:0] io_uop_rxq_idx, // @[issue-slot.scala:73:14] output [6:0] io_uop_pdst, // @[issue-slot.scala:73:14] output [6:0] io_uop_prs1, // @[issue-slot.scala:73:14] output [6:0] io_uop_prs2, // @[issue-slot.scala:73:14] output [6:0] io_uop_prs3, // @[issue-slot.scala:73:14] output [4:0] io_uop_ppred, // @[issue-slot.scala:73:14] output io_uop_prs1_busy, // @[issue-slot.scala:73:14] output io_uop_prs2_busy, // @[issue-slot.scala:73:14] output io_uop_prs3_busy, // @[issue-slot.scala:73:14] output io_uop_ppred_busy, // @[issue-slot.scala:73:14] output [6:0] io_uop_stale_pdst, // @[issue-slot.scala:73:14] output io_uop_exception, // @[issue-slot.scala:73:14] output [63:0] io_uop_exc_cause, // @[issue-slot.scala:73:14] output io_uop_bypassable, // @[issue-slot.scala:73:14] output [4:0] io_uop_mem_cmd, // @[issue-slot.scala:73:14] output [1:0] io_uop_mem_size, // @[issue-slot.scala:73:14] output io_uop_mem_signed, // @[issue-slot.scala:73:14] output io_uop_is_fence, // @[issue-slot.scala:73:14] output io_uop_is_fencei, // @[issue-slot.scala:73:14] output io_uop_is_amo, // @[issue-slot.scala:73:14] output io_uop_uses_ldq, // @[issue-slot.scala:73:14] output io_uop_uses_stq, // @[issue-slot.scala:73:14] output io_uop_is_sys_pc2epc, // @[issue-slot.scala:73:14] output io_uop_is_unique, // @[issue-slot.scala:73:14] output io_uop_flush_on_commit, // @[issue-slot.scala:73:14] output io_uop_ldst_is_rs1, // @[issue-slot.scala:73:14] output [5:0] io_uop_ldst, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs1, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs2, // @[issue-slot.scala:73:14] output [5:0] io_uop_lrs3, // @[issue-slot.scala:73:14] output io_uop_ldst_val, // @[issue-slot.scala:73:14] output [1:0] io_uop_dst_rtype, // @[issue-slot.scala:73:14] output [1:0] io_uop_lrs1_rtype, // @[issue-slot.scala:73:14] output [1:0] io_uop_lrs2_rtype, // @[issue-slot.scala:73:14] output io_uop_frs3_en, // @[issue-slot.scala:73:14] output io_uop_fp_val, // @[issue-slot.scala:73:14] output io_uop_fp_single, // @[issue-slot.scala:73:14] output io_uop_xcpt_pf_if, // @[issue-slot.scala:73:14] output io_uop_xcpt_ae_if, // @[issue-slot.scala:73:14] output io_uop_xcpt_ma_if, // @[issue-slot.scala:73:14] output io_uop_bp_debug_if, // @[issue-slot.scala:73:14] output io_uop_bp_xcpt_if, // @[issue-slot.scala:73:14] output [1:0] io_uop_debug_fsrc, // @[issue-slot.scala:73:14] output [1:0] io_uop_debug_tsrc, // @[issue-slot.scala:73:14] output io_debug_p1, // @[issue-slot.scala:73:14] output io_debug_p2, // @[issue-slot.scala:73:14] output io_debug_p3, // @[issue-slot.scala:73:14] output io_debug_ppred, // @[issue-slot.scala:73:14] output [1:0] io_debug_state // @[issue-slot.scala:73:14] ); wire io_grant_0 = io_grant; // @[issue-slot.scala:69:7] wire [15:0] io_brupdate_b1_resolve_mask_0 = io_brupdate_b1_resolve_mask; // @[issue-slot.scala:69:7] wire [15:0] io_brupdate_b1_mispredict_mask_0 = io_brupdate_b1_mispredict_mask; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_uopc_0 = io_brupdate_b2_uop_uopc; // @[issue-slot.scala:69:7] wire [31:0] io_brupdate_b2_uop_inst_0 = io_brupdate_b2_uop_inst; // @[issue-slot.scala:69:7] wire [31:0] io_brupdate_b2_uop_debug_inst_0 = io_brupdate_b2_uop_debug_inst; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_rvc_0 = io_brupdate_b2_uop_is_rvc; // @[issue-slot.scala:69:7] wire [39:0] io_brupdate_b2_uop_debug_pc_0 = io_brupdate_b2_uop_debug_pc; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_iq_type_0 = io_brupdate_b2_uop_iq_type; // @[issue-slot.scala:69:7] wire [9:0] io_brupdate_b2_uop_fu_code_0 = io_brupdate_b2_uop_fu_code; // @[issue-slot.scala:69:7] wire [3:0] io_brupdate_b2_uop_ctrl_br_type_0 = io_brupdate_b2_uop_ctrl_br_type; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_ctrl_op1_sel_0 = io_brupdate_b2_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_op2_sel_0 = io_brupdate_b2_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_imm_sel_0 = io_brupdate_b2_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ctrl_op_fcn_0 = io_brupdate_b2_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_fcn_dw_0 = io_brupdate_b2_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_uop_ctrl_csr_cmd_0 = io_brupdate_b2_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_load_0 = io_brupdate_b2_uop_ctrl_is_load; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_sta_0 = io_brupdate_b2_uop_ctrl_is_sta; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ctrl_is_std_0 = io_brupdate_b2_uop_ctrl_is_std; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_iw_state_0 = io_brupdate_b2_uop_iw_state; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_iw_p1_poisoned_0 = io_brupdate_b2_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_iw_p2_poisoned_0 = io_brupdate_b2_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_br_0 = io_brupdate_b2_uop_is_br; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_jalr_0 = io_brupdate_b2_uop_is_jalr; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_jal_0 = io_brupdate_b2_uop_is_jal; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_sfb_0 = io_brupdate_b2_uop_is_sfb; // @[issue-slot.scala:69:7] wire [15:0] io_brupdate_b2_uop_br_mask_0 = io_brupdate_b2_uop_br_mask; // @[issue-slot.scala:69:7] wire [3:0] io_brupdate_b2_uop_br_tag_0 = io_brupdate_b2_uop_br_tag; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ftq_idx_0 = io_brupdate_b2_uop_ftq_idx; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_edge_inst_0 = io_brupdate_b2_uop_edge_inst; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_pc_lob_0 = io_brupdate_b2_uop_pc_lob; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_taken_0 = io_brupdate_b2_uop_taken; // @[issue-slot.scala:69:7] wire [19:0] io_brupdate_b2_uop_imm_packed_0 = io_brupdate_b2_uop_imm_packed; // @[issue-slot.scala:69:7] wire [11:0] io_brupdate_b2_uop_csr_addr_0 = io_brupdate_b2_uop_csr_addr; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_rob_idx_0 = io_brupdate_b2_uop_rob_idx; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ldq_idx_0 = io_brupdate_b2_uop_ldq_idx; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_stq_idx_0 = io_brupdate_b2_uop_stq_idx; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_rxq_idx_0 = io_brupdate_b2_uop_rxq_idx; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_pdst_0 = io_brupdate_b2_uop_pdst; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_prs1_0 = io_brupdate_b2_uop_prs1; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_prs2_0 = io_brupdate_b2_uop_prs2; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_prs3_0 = io_brupdate_b2_uop_prs3; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_ppred_0 = io_brupdate_b2_uop_ppred; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs1_busy_0 = io_brupdate_b2_uop_prs1_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs2_busy_0 = io_brupdate_b2_uop_prs2_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_prs3_busy_0 = io_brupdate_b2_uop_prs3_busy; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ppred_busy_0 = io_brupdate_b2_uop_ppred_busy; // @[issue-slot.scala:69:7] wire [6:0] io_brupdate_b2_uop_stale_pdst_0 = io_brupdate_b2_uop_stale_pdst; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_exception_0 = io_brupdate_b2_uop_exception; // @[issue-slot.scala:69:7] wire [63:0] io_brupdate_b2_uop_exc_cause_0 = io_brupdate_b2_uop_exc_cause; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bypassable_0 = io_brupdate_b2_uop_bypassable; // @[issue-slot.scala:69:7] wire [4:0] io_brupdate_b2_uop_mem_cmd_0 = io_brupdate_b2_uop_mem_cmd; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_mem_size_0 = io_brupdate_b2_uop_mem_size; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_mem_signed_0 = io_brupdate_b2_uop_mem_signed; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_fence_0 = io_brupdate_b2_uop_is_fence; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_fencei_0 = io_brupdate_b2_uop_is_fencei; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_amo_0 = io_brupdate_b2_uop_is_amo; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_uses_ldq_0 = io_brupdate_b2_uop_uses_ldq; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_uses_stq_0 = io_brupdate_b2_uop_uses_stq; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_sys_pc2epc_0 = io_brupdate_b2_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_is_unique_0 = io_brupdate_b2_uop_is_unique; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_flush_on_commit_0 = io_brupdate_b2_uop_flush_on_commit; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ldst_is_rs1_0 = io_brupdate_b2_uop_ldst_is_rs1; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_ldst_0 = io_brupdate_b2_uop_ldst; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs1_0 = io_brupdate_b2_uop_lrs1; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs2_0 = io_brupdate_b2_uop_lrs2; // @[issue-slot.scala:69:7] wire [5:0] io_brupdate_b2_uop_lrs3_0 = io_brupdate_b2_uop_lrs3; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_ldst_val_0 = io_brupdate_b2_uop_ldst_val; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_dst_rtype_0 = io_brupdate_b2_uop_dst_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_lrs1_rtype_0 = io_brupdate_b2_uop_lrs1_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_lrs2_rtype_0 = io_brupdate_b2_uop_lrs2_rtype; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_frs3_en_0 = io_brupdate_b2_uop_frs3_en; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_fp_val_0 = io_brupdate_b2_uop_fp_val; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_fp_single_0 = io_brupdate_b2_uop_fp_single; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_pf_if_0 = io_brupdate_b2_uop_xcpt_pf_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_ae_if_0 = io_brupdate_b2_uop_xcpt_ae_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_xcpt_ma_if_0 = io_brupdate_b2_uop_xcpt_ma_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bp_debug_if_0 = io_brupdate_b2_uop_bp_debug_if; // @[issue-slot.scala:69:7] wire io_brupdate_b2_uop_bp_xcpt_if_0 = io_brupdate_b2_uop_bp_xcpt_if; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_debug_fsrc_0 = io_brupdate_b2_uop_debug_fsrc; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_uop_debug_tsrc_0 = io_brupdate_b2_uop_debug_tsrc; // @[issue-slot.scala:69:7] wire io_brupdate_b2_valid_0 = io_brupdate_b2_valid; // @[issue-slot.scala:69:7] wire io_brupdate_b2_mispredict_0 = io_brupdate_b2_mispredict; // @[issue-slot.scala:69:7] wire io_brupdate_b2_taken_0 = io_brupdate_b2_taken; // @[issue-slot.scala:69:7] wire [2:0] io_brupdate_b2_cfi_type_0 = io_brupdate_b2_cfi_type; // @[issue-slot.scala:69:7] wire [1:0] io_brupdate_b2_pc_sel_0 = io_brupdate_b2_pc_sel; // @[issue-slot.scala:69:7] wire [39:0] io_brupdate_b2_jalr_target_0 = io_brupdate_b2_jalr_target; // @[issue-slot.scala:69:7] wire [20:0] io_brupdate_b2_target_offset_0 = io_brupdate_b2_target_offset; // @[issue-slot.scala:69:7] wire io_kill_0 = io_kill; // @[issue-slot.scala:69:7] wire io_clear_0 = io_clear; // @[issue-slot.scala:69:7] wire io_wakeup_ports_0_valid_0 = io_wakeup_ports_0_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_0_bits_pdst_0 = io_wakeup_ports_0_bits_pdst; // @[issue-slot.scala:69:7] wire io_wakeup_ports_1_valid_0 = io_wakeup_ports_1_valid; // @[issue-slot.scala:69:7] wire [6:0] io_wakeup_ports_1_bits_pdst_0 = io_wakeup_ports_1_bits_pdst; // @[issue-slot.scala:69:7] wire io_in_uop_valid_0 = io_in_uop_valid; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_uopc_0 = io_in_uop_bits_uopc; // @[issue-slot.scala:69:7] wire [31:0] io_in_uop_bits_inst_0 = io_in_uop_bits_inst; // @[issue-slot.scala:69:7] wire [31:0] io_in_uop_bits_debug_inst_0 = io_in_uop_bits_debug_inst; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_rvc_0 = io_in_uop_bits_is_rvc; // @[issue-slot.scala:69:7] wire [39:0] io_in_uop_bits_debug_pc_0 = io_in_uop_bits_debug_pc; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_iq_type_0 = io_in_uop_bits_iq_type; // @[issue-slot.scala:69:7] wire [9:0] io_in_uop_bits_fu_code_0 = io_in_uop_bits_fu_code; // @[issue-slot.scala:69:7] wire [3:0] io_in_uop_bits_ctrl_br_type_0 = io_in_uop_bits_ctrl_br_type; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_ctrl_op1_sel_0 = io_in_uop_bits_ctrl_op1_sel; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_op2_sel_0 = io_in_uop_bits_ctrl_op2_sel; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_imm_sel_0 = io_in_uop_bits_ctrl_imm_sel; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ctrl_op_fcn_0 = io_in_uop_bits_ctrl_op_fcn; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_fcn_dw_0 = io_in_uop_bits_ctrl_fcn_dw; // @[issue-slot.scala:69:7] wire [2:0] io_in_uop_bits_ctrl_csr_cmd_0 = io_in_uop_bits_ctrl_csr_cmd; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_load_0 = io_in_uop_bits_ctrl_is_load; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_sta_0 = io_in_uop_bits_ctrl_is_sta; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ctrl_is_std_0 = io_in_uop_bits_ctrl_is_std; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_iw_state_0 = io_in_uop_bits_iw_state; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_br_0 = io_in_uop_bits_is_br; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_jalr_0 = io_in_uop_bits_is_jalr; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_jal_0 = io_in_uop_bits_is_jal; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_sfb_0 = io_in_uop_bits_is_sfb; // @[issue-slot.scala:69:7] wire [15:0] io_in_uop_bits_br_mask_0 = io_in_uop_bits_br_mask; // @[issue-slot.scala:69:7] wire [3:0] io_in_uop_bits_br_tag_0 = io_in_uop_bits_br_tag; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ftq_idx_0 = io_in_uop_bits_ftq_idx; // @[issue-slot.scala:69:7] wire io_in_uop_bits_edge_inst_0 = io_in_uop_bits_edge_inst; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_pc_lob_0 = io_in_uop_bits_pc_lob; // @[issue-slot.scala:69:7] wire io_in_uop_bits_taken_0 = io_in_uop_bits_taken; // @[issue-slot.scala:69:7] wire [19:0] io_in_uop_bits_imm_packed_0 = io_in_uop_bits_imm_packed; // @[issue-slot.scala:69:7] wire [11:0] io_in_uop_bits_csr_addr_0 = io_in_uop_bits_csr_addr; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_rob_idx_0 = io_in_uop_bits_rob_idx; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ldq_idx_0 = io_in_uop_bits_ldq_idx; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_stq_idx_0 = io_in_uop_bits_stq_idx; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_rxq_idx_0 = io_in_uop_bits_rxq_idx; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_pdst_0 = io_in_uop_bits_pdst; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_prs1_0 = io_in_uop_bits_prs1; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_prs2_0 = io_in_uop_bits_prs2; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_prs3_0 = io_in_uop_bits_prs3; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_ppred_0 = io_in_uop_bits_ppred; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs1_busy_0 = io_in_uop_bits_prs1_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs2_busy_0 = io_in_uop_bits_prs2_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_prs3_busy_0 = io_in_uop_bits_prs3_busy; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ppred_busy_0 = io_in_uop_bits_ppred_busy; // @[issue-slot.scala:69:7] wire [6:0] io_in_uop_bits_stale_pdst_0 = io_in_uop_bits_stale_pdst; // @[issue-slot.scala:69:7] wire io_in_uop_bits_exception_0 = io_in_uop_bits_exception; // @[issue-slot.scala:69:7] wire [63:0] io_in_uop_bits_exc_cause_0 = io_in_uop_bits_exc_cause; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bypassable_0 = io_in_uop_bits_bypassable; // @[issue-slot.scala:69:7] wire [4:0] io_in_uop_bits_mem_cmd_0 = io_in_uop_bits_mem_cmd; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_mem_size_0 = io_in_uop_bits_mem_size; // @[issue-slot.scala:69:7] wire io_in_uop_bits_mem_signed_0 = io_in_uop_bits_mem_signed; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_fence_0 = io_in_uop_bits_is_fence; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_fencei_0 = io_in_uop_bits_is_fencei; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_amo_0 = io_in_uop_bits_is_amo; // @[issue-slot.scala:69:7] wire io_in_uop_bits_uses_ldq_0 = io_in_uop_bits_uses_ldq; // @[issue-slot.scala:69:7] wire io_in_uop_bits_uses_stq_0 = io_in_uop_bits_uses_stq; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_sys_pc2epc_0 = io_in_uop_bits_is_sys_pc2epc; // @[issue-slot.scala:69:7] wire io_in_uop_bits_is_unique_0 = io_in_uop_bits_is_unique; // @[issue-slot.scala:69:7] wire io_in_uop_bits_flush_on_commit_0 = io_in_uop_bits_flush_on_commit; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ldst_is_rs1_0 = io_in_uop_bits_ldst_is_rs1; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_ldst_0 = io_in_uop_bits_ldst; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs1_0 = io_in_uop_bits_lrs1; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs2_0 = io_in_uop_bits_lrs2; // @[issue-slot.scala:69:7] wire [5:0] io_in_uop_bits_lrs3_0 = io_in_uop_bits_lrs3; // @[issue-slot.scala:69:7] wire io_in_uop_bits_ldst_val_0 = io_in_uop_bits_ldst_val; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_dst_rtype_0 = io_in_uop_bits_dst_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_lrs1_rtype_0 = io_in_uop_bits_lrs1_rtype; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_lrs2_rtype_0 = io_in_uop_bits_lrs2_rtype; // @[issue-slot.scala:69:7] wire io_in_uop_bits_frs3_en_0 = io_in_uop_bits_frs3_en; // @[issue-slot.scala:69:7] wire io_in_uop_bits_fp_val_0 = io_in_uop_bits_fp_val; // @[issue-slot.scala:69:7] wire io_in_uop_bits_fp_single_0 = io_in_uop_bits_fp_single; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_pf_if_0 = io_in_uop_bits_xcpt_pf_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_ae_if_0 = io_in_uop_bits_xcpt_ae_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_xcpt_ma_if_0 = io_in_uop_bits_xcpt_ma_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bp_debug_if_0 = io_in_uop_bits_bp_debug_if; // @[issue-slot.scala:69:7] wire io_in_uop_bits_bp_xcpt_if_0 = io_in_uop_bits_bp_xcpt_if; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_debug_fsrc_0 = io_in_uop_bits_debug_fsrc; // @[issue-slot.scala:69:7] wire [1:0] io_in_uop_bits_debug_tsrc_0 = io_in_uop_bits_debug_tsrc; // @[issue-slot.scala:69:7] wire io_ldspec_miss = 1'h0; // @[issue-slot.scala:69:7] wire io_wakeup_ports_0_bits_poisoned = 1'h0; // @[issue-slot.scala:69:7] wire io_wakeup_ports_1_bits_poisoned = 1'h0; // @[issue-slot.scala:69:7] wire io_pred_wakeup_port_valid = 1'h0; // @[issue-slot.scala:69:7] wire io_spec_ld_wakeup_0_valid = 1'h0; // @[issue-slot.scala:69:7] wire io_in_uop_bits_iw_p1_poisoned = 1'h0; // @[issue-slot.scala:69:7] wire io_in_uop_bits_iw_p2_poisoned = 1'h0; // @[issue-slot.scala:69:7] wire io_out_uop_iw_p1_poisoned = 1'h0; // @[issue-slot.scala:69:7] wire io_out_uop_iw_p2_poisoned = 1'h0; // @[issue-slot.scala:69:7] wire io_uop_iw_p1_poisoned = 1'h0; // @[issue-slot.scala:69:7] wire io_uop_iw_p2_poisoned = 1'h0; // @[issue-slot.scala:69:7] wire next_p1_poisoned = 1'h0; // @[issue-slot.scala:99:29] wire next_p2_poisoned = 1'h0; // @[issue-slot.scala:100:29] wire slot_uop_uop_is_rvc = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_fcn_dw = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_load = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_sta = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ctrl_is_std = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_iw_p1_poisoned = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_iw_p2_poisoned = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_br = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_jalr = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_jal = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_sfb = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_edge_inst = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_taken = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs1_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs2_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_prs3_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ppred_busy = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_exception = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bypassable = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_mem_signed = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_fence = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_fencei = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_amo = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_uses_ldq = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_uses_stq = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_sys_pc2epc = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_is_unique = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_flush_on_commit = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ldst_is_rs1 = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_ldst_val = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_frs3_en = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_fp_val = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_fp_single = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_pf_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_ae_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_xcpt_ma_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bp_debug_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_uop_bp_xcpt_if = 1'h0; // @[consts.scala:269:19] wire slot_uop_cs_fcn_dw = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_load = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_sta = 1'h0; // @[consts.scala:279:18] wire slot_uop_cs_is_std = 1'h0; // @[consts.scala:279:18] wire _squash_grant_T = 1'h0; // @[issue-slot.scala:261:53] wire squash_grant = 1'h0; // @[issue-slot.scala:261:37] wire [4:0] io_pred_wakeup_port_bits = 5'h0; // @[issue-slot.scala:69:7] wire [4:0] slot_uop_uop_ctrl_op_fcn = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_ftq_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_ldq_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_stq_idx = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_ppred = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_uop_mem_cmd = 5'h0; // @[consts.scala:269:19] wire [4:0] slot_uop_cs_op_fcn = 5'h0; // @[consts.scala:279:18] wire [6:0] io_spec_ld_wakeup_0_bits = 7'h0; // @[issue-slot.scala:69:7] wire [6:0] slot_uop_uop_uopc = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_rob_idx = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_pdst = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_prs1 = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_prs2 = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_prs3 = 7'h0; // @[consts.scala:269:19] wire [6:0] slot_uop_uop_stale_pdst = 7'h0; // @[consts.scala:269:19] wire _io_will_be_valid_T_1 = 1'h1; // @[issue-slot.scala:262:51] wire [1:0] slot_uop_uop_ctrl_op1_sel = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_iw_state = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_rxq_idx = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_mem_size = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_lrs1_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_lrs2_rtype = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_debug_fsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_uop_debug_tsrc = 2'h0; // @[consts.scala:269:19] wire [1:0] slot_uop_cs_op1_sel = 2'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_uop_iq_type = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_op2_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_imm_sel = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_uop_ctrl_csr_cmd = 3'h0; // @[consts.scala:269:19] wire [2:0] slot_uop_cs_op2_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_cs_imm_sel = 3'h0; // @[consts.scala:279:18] wire [2:0] slot_uop_cs_csr_cmd = 3'h0; // @[consts.scala:279:18] wire [3:0] slot_uop_uop_ctrl_br_type = 4'h0; // @[consts.scala:269:19] wire [3:0] slot_uop_uop_br_tag = 4'h0; // @[consts.scala:269:19] wire [3:0] slot_uop_cs_br_type = 4'h0; // @[consts.scala:279:18] wire [1:0] slot_uop_uop_dst_rtype = 2'h2; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_pc_lob = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_ldst = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs1 = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs2 = 6'h0; // @[consts.scala:269:19] wire [5:0] slot_uop_uop_lrs3 = 6'h0; // @[consts.scala:269:19] wire [63:0] slot_uop_uop_exc_cause = 64'h0; // @[consts.scala:269:19] wire [11:0] slot_uop_uop_csr_addr = 12'h0; // @[consts.scala:269:19] wire [19:0] slot_uop_uop_imm_packed = 20'h0; // @[consts.scala:269:19] wire [15:0] slot_uop_uop_br_mask = 16'h0; // @[consts.scala:269:19] wire [9:0] slot_uop_uop_fu_code = 10'h0; // @[consts.scala:269:19] wire [39:0] slot_uop_uop_debug_pc = 40'h0; // @[consts.scala:269:19] wire [31:0] slot_uop_uop_inst = 32'h0; // @[consts.scala:269:19] wire [31:0] slot_uop_uop_debug_inst = 32'h0; // @[consts.scala:269:19] wire _io_valid_T; // @[issue-slot.scala:79:24] wire _io_will_be_valid_T_4; // @[issue-slot.scala:262:32] wire _io_request_hp_T; // @[issue-slot.scala:243:31] wire [6:0] next_uopc; // @[issue-slot.scala:82:29] wire [1:0] next_state; // @[issue-slot.scala:81:29] wire [15:0] next_br_mask; // @[util.scala:85:25] wire _io_out_uop_prs1_busy_T; // @[issue-slot.scala:270:28] wire _io_out_uop_prs2_busy_T; // @[issue-slot.scala:271:28] wire _io_out_uop_prs3_busy_T; // @[issue-slot.scala:272:28] wire _io_out_uop_ppred_busy_T; // @[issue-slot.scala:273:28] wire [1:0] next_lrs1_rtype; // @[issue-slot.scala:83:29] wire [1:0] next_lrs2_rtype; // @[issue-slot.scala:84:29] wire [3:0] io_out_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7] wire io_out_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_uopc_0; // @[issue-slot.scala:69:7] wire [31:0] io_out_uop_inst_0; // @[issue-slot.scala:69:7] wire [31:0] io_out_uop_debug_inst_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_rvc_0; // @[issue-slot.scala:69:7] wire [39:0] io_out_uop_debug_pc_0; // @[issue-slot.scala:69:7] wire [2:0] io_out_uop_iq_type_0; // @[issue-slot.scala:69:7] wire [9:0] io_out_uop_fu_code_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_iw_state_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_br_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_jalr_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_jal_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_sfb_0; // @[issue-slot.scala:69:7] wire [15:0] io_out_uop_br_mask_0; // @[issue-slot.scala:69:7] wire [3:0] io_out_uop_br_tag_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ftq_idx_0; // @[issue-slot.scala:69:7] wire io_out_uop_edge_inst_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_pc_lob_0; // @[issue-slot.scala:69:7] wire io_out_uop_taken_0; // @[issue-slot.scala:69:7] wire [19:0] io_out_uop_imm_packed_0; // @[issue-slot.scala:69:7] wire [11:0] io_out_uop_csr_addr_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_rob_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ldq_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_stq_idx_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_rxq_idx_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_pdst_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_prs1_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_prs2_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_prs3_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_ppred_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs1_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs2_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_prs3_busy_0; // @[issue-slot.scala:69:7] wire io_out_uop_ppred_busy_0; // @[issue-slot.scala:69:7] wire [6:0] io_out_uop_stale_pdst_0; // @[issue-slot.scala:69:7] wire io_out_uop_exception_0; // @[issue-slot.scala:69:7] wire [63:0] io_out_uop_exc_cause_0; // @[issue-slot.scala:69:7] wire io_out_uop_bypassable_0; // @[issue-slot.scala:69:7] wire [4:0] io_out_uop_mem_cmd_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_mem_size_0; // @[issue-slot.scala:69:7] wire io_out_uop_mem_signed_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_fence_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_fencei_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_amo_0; // @[issue-slot.scala:69:7] wire io_out_uop_uses_ldq_0; // @[issue-slot.scala:69:7] wire io_out_uop_uses_stq_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7] wire io_out_uop_is_unique_0; // @[issue-slot.scala:69:7] wire io_out_uop_flush_on_commit_0; // @[issue-slot.scala:69:7] wire io_out_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_ldst_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs2_0; // @[issue-slot.scala:69:7] wire [5:0] io_out_uop_lrs3_0; // @[issue-slot.scala:69:7] wire io_out_uop_ldst_val_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_dst_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7] wire io_out_uop_frs3_en_0; // @[issue-slot.scala:69:7] wire io_out_uop_fp_val_0; // @[issue-slot.scala:69:7] wire io_out_uop_fp_single_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_bp_debug_if_0; // @[issue-slot.scala:69:7] wire io_out_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_debug_fsrc_0; // @[issue-slot.scala:69:7] wire [1:0] io_out_uop_debug_tsrc_0; // @[issue-slot.scala:69:7] wire [3:0] io_uop_ctrl_br_type_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_ctrl_op1_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_op2_sel_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_imm_sel_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ctrl_op_fcn_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_fcn_dw_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_ctrl_csr_cmd_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_load_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_sta_0; // @[issue-slot.scala:69:7] wire io_uop_ctrl_is_std_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_uopc_0; // @[issue-slot.scala:69:7] wire [31:0] io_uop_inst_0; // @[issue-slot.scala:69:7] wire [31:0] io_uop_debug_inst_0; // @[issue-slot.scala:69:7] wire io_uop_is_rvc_0; // @[issue-slot.scala:69:7] wire [39:0] io_uop_debug_pc_0; // @[issue-slot.scala:69:7] wire [2:0] io_uop_iq_type_0; // @[issue-slot.scala:69:7] wire [9:0] io_uop_fu_code_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_iw_state_0; // @[issue-slot.scala:69:7] wire io_uop_is_br_0; // @[issue-slot.scala:69:7] wire io_uop_is_jalr_0; // @[issue-slot.scala:69:7] wire io_uop_is_jal_0; // @[issue-slot.scala:69:7] wire io_uop_is_sfb_0; // @[issue-slot.scala:69:7] wire [15:0] io_uop_br_mask_0; // @[issue-slot.scala:69:7] wire [3:0] io_uop_br_tag_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ftq_idx_0; // @[issue-slot.scala:69:7] wire io_uop_edge_inst_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_pc_lob_0; // @[issue-slot.scala:69:7] wire io_uop_taken_0; // @[issue-slot.scala:69:7] wire [19:0] io_uop_imm_packed_0; // @[issue-slot.scala:69:7] wire [11:0] io_uop_csr_addr_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_rob_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ldq_idx_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_stq_idx_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_rxq_idx_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_pdst_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_prs1_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_prs2_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_prs3_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_ppred_0; // @[issue-slot.scala:69:7] wire io_uop_prs1_busy_0; // @[issue-slot.scala:69:7] wire io_uop_prs2_busy_0; // @[issue-slot.scala:69:7] wire io_uop_prs3_busy_0; // @[issue-slot.scala:69:7] wire io_uop_ppred_busy_0; // @[issue-slot.scala:69:7] wire [6:0] io_uop_stale_pdst_0; // @[issue-slot.scala:69:7] wire io_uop_exception_0; // @[issue-slot.scala:69:7] wire [63:0] io_uop_exc_cause_0; // @[issue-slot.scala:69:7] wire io_uop_bypassable_0; // @[issue-slot.scala:69:7] wire [4:0] io_uop_mem_cmd_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_mem_size_0; // @[issue-slot.scala:69:7] wire io_uop_mem_signed_0; // @[issue-slot.scala:69:7] wire io_uop_is_fence_0; // @[issue-slot.scala:69:7] wire io_uop_is_fencei_0; // @[issue-slot.scala:69:7] wire io_uop_is_amo_0; // @[issue-slot.scala:69:7] wire io_uop_uses_ldq_0; // @[issue-slot.scala:69:7] wire io_uop_uses_stq_0; // @[issue-slot.scala:69:7] wire io_uop_is_sys_pc2epc_0; // @[issue-slot.scala:69:7] wire io_uop_is_unique_0; // @[issue-slot.scala:69:7] wire io_uop_flush_on_commit_0; // @[issue-slot.scala:69:7] wire io_uop_ldst_is_rs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_ldst_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs1_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs2_0; // @[issue-slot.scala:69:7] wire [5:0] io_uop_lrs3_0; // @[issue-slot.scala:69:7] wire io_uop_ldst_val_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_dst_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_lrs1_rtype_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_lrs2_rtype_0; // @[issue-slot.scala:69:7] wire io_uop_frs3_en_0; // @[issue-slot.scala:69:7] wire io_uop_fp_val_0; // @[issue-slot.scala:69:7] wire io_uop_fp_single_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_pf_if_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_ae_if_0; // @[issue-slot.scala:69:7] wire io_uop_xcpt_ma_if_0; // @[issue-slot.scala:69:7] wire io_uop_bp_debug_if_0; // @[issue-slot.scala:69:7] wire io_uop_bp_xcpt_if_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_debug_fsrc_0; // @[issue-slot.scala:69:7] wire [1:0] io_uop_debug_tsrc_0; // @[issue-slot.scala:69:7] wire io_debug_p1_0; // @[issue-slot.scala:69:7] wire io_debug_p2_0; // @[issue-slot.scala:69:7] wire io_debug_p3_0; // @[issue-slot.scala:69:7] wire io_debug_ppred_0; // @[issue-slot.scala:69:7] wire [1:0] io_debug_state_0; // @[issue-slot.scala:69:7] wire io_valid_0; // @[issue-slot.scala:69:7] wire io_will_be_valid_0; // @[issue-slot.scala:69:7] wire io_request_0; // @[issue-slot.scala:69:7] wire io_request_hp_0; // @[issue-slot.scala:69:7] assign io_out_uop_iw_state_0 = next_state; // @[issue-slot.scala:69:7, :81:29] assign io_out_uop_uopc_0 = next_uopc; // @[issue-slot.scala:69:7, :82:29] assign io_out_uop_lrs1_rtype_0 = next_lrs1_rtype; // @[issue-slot.scala:69:7, :83:29] assign io_out_uop_lrs2_rtype_0 = next_lrs2_rtype; // @[issue-slot.scala:69:7, :84:29] reg [1:0] state; // @[issue-slot.scala:86:22] assign io_debug_state_0 = state; // @[issue-slot.scala:69:7, :86:22] reg p1; // @[issue-slot.scala:87:22] assign io_debug_p1_0 = p1; // @[issue-slot.scala:69:7, :87:22] wire next_p1 = p1; // @[issue-slot.scala:87:22, :163:25] reg p2; // @[issue-slot.scala:88:22] assign io_debug_p2_0 = p2; // @[issue-slot.scala:69:7, :88:22] wire next_p2 = p2; // @[issue-slot.scala:88:22, :164:25] reg p3; // @[issue-slot.scala:89:22] assign io_debug_p3_0 = p3; // @[issue-slot.scala:69:7, :89:22] wire next_p3 = p3; // @[issue-slot.scala:89:22, :165:25] reg ppred; // @[issue-slot.scala:90:22] assign io_debug_ppred_0 = ppred; // @[issue-slot.scala:69:7, :90:22] wire next_ppred = ppred; // @[issue-slot.scala:90:22, :166:28] reg [6:0] slot_uop_uopc; // @[issue-slot.scala:102:25] reg [31:0] slot_uop_inst; // @[issue-slot.scala:102:25] assign io_out_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_inst_0 = slot_uop_inst; // @[issue-slot.scala:69:7, :102:25] reg [31:0] slot_uop_debug_inst; // @[issue-slot.scala:102:25] assign io_out_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_inst_0 = slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_rvc; // @[issue-slot.scala:102:25] assign io_out_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_rvc_0 = slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25] reg [39:0] slot_uop_debug_pc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_pc_0 = slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_iq_type; // @[issue-slot.scala:102:25] assign io_out_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25] assign io_uop_iq_type_0 = slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25] reg [9:0] slot_uop_fu_code; // @[issue-slot.scala:102:25] assign io_out_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fu_code_0 = slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25] reg [3:0] slot_uop_ctrl_br_type; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_br_type_0 = slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_ctrl_op1_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op1_sel_0 = slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_op2_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op2_sel_0 = slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_imm_sel; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_imm_sel_0 = slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ctrl_op_fcn; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_op_fcn_0 = slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_fcn_dw_0 = slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25] reg [2:0] slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_csr_cmd_0 = slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_load; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_load_0 = slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_sta; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_sta_0 = slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ctrl_is_std; // @[issue-slot.scala:102:25] assign io_out_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ctrl_is_std_0 = slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_iw_state; // @[issue-slot.scala:102:25] assign io_uop_iw_state_0 = slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_iw_p1_poisoned; // @[issue-slot.scala:102:25] reg slot_uop_iw_p2_poisoned; // @[issue-slot.scala:102:25] reg slot_uop_is_br; // @[issue-slot.scala:102:25] assign io_out_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_br_0 = slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_jalr; // @[issue-slot.scala:102:25] assign io_out_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_jalr_0 = slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_jal; // @[issue-slot.scala:102:25] assign io_out_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_jal_0 = slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_sfb; // @[issue-slot.scala:102:25] assign io_out_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_sfb_0 = slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25] reg [15:0] slot_uop_br_mask; // @[issue-slot.scala:102:25] assign io_uop_br_mask_0 = slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25] reg [3:0] slot_uop_br_tag; // @[issue-slot.scala:102:25] assign io_out_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25] assign io_uop_br_tag_0 = slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ftq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ftq_idx_0 = slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_edge_inst; // @[issue-slot.scala:102:25] assign io_out_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_edge_inst_0 = slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_pc_lob; // @[issue-slot.scala:102:25] assign io_out_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25] assign io_uop_pc_lob_0 = slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_taken; // @[issue-slot.scala:102:25] assign io_out_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25] assign io_uop_taken_0 = slot_uop_taken; // @[issue-slot.scala:69:7, :102:25] reg [19:0] slot_uop_imm_packed; // @[issue-slot.scala:102:25] assign io_out_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25] assign io_uop_imm_packed_0 = slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25] reg [11:0] slot_uop_csr_addr; // @[issue-slot.scala:102:25] assign io_out_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25] assign io_uop_csr_addr_0 = slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_rob_idx; // @[issue-slot.scala:102:25] assign io_out_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_rob_idx_0 = slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ldq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldq_idx_0 = slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_stq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_stq_idx_0 = slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_rxq_idx; // @[issue-slot.scala:102:25] assign io_out_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25] assign io_uop_rxq_idx_0 = slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_pdst; // @[issue-slot.scala:102:25] assign io_out_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_pdst_0 = slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_prs1; // @[issue-slot.scala:102:25] assign io_out_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs1_0 = slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_prs2; // @[issue-slot.scala:102:25] assign io_out_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs2_0 = slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_prs3; // @[issue-slot.scala:102:25] assign io_out_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25] assign io_uop_prs3_0 = slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_ppred; // @[issue-slot.scala:102:25] assign io_out_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ppred_0 = slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs1_busy; // @[issue-slot.scala:102:25] assign io_uop_prs1_busy_0 = slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs2_busy; // @[issue-slot.scala:102:25] assign io_uop_prs2_busy_0 = slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_prs3_busy; // @[issue-slot.scala:102:25] assign io_uop_prs3_busy_0 = slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ppred_busy; // @[issue-slot.scala:102:25] assign io_uop_ppred_busy_0 = slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25] reg [6:0] slot_uop_stale_pdst; // @[issue-slot.scala:102:25] assign io_out_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_stale_pdst_0 = slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_exception; // @[issue-slot.scala:102:25] assign io_out_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25] assign io_uop_exception_0 = slot_uop_exception; // @[issue-slot.scala:69:7, :102:25] reg [63:0] slot_uop_exc_cause; // @[issue-slot.scala:102:25] assign io_out_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25] assign io_uop_exc_cause_0 = slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bypassable; // @[issue-slot.scala:102:25] assign io_out_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bypassable_0 = slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25] reg [4:0] slot_uop_mem_cmd; // @[issue-slot.scala:102:25] assign io_out_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_cmd_0 = slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_mem_size; // @[issue-slot.scala:102:25] assign io_out_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_size_0 = slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_mem_signed; // @[issue-slot.scala:102:25] assign io_out_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25] assign io_uop_mem_signed_0 = slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_fence; // @[issue-slot.scala:102:25] assign io_out_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_fence_0 = slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_fencei; // @[issue-slot.scala:102:25] assign io_out_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_fencei_0 = slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_amo; // @[issue-slot.scala:102:25] assign io_out_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_amo_0 = slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_uses_ldq; // @[issue-slot.scala:102:25] assign io_out_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25] assign io_uop_uses_ldq_0 = slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_uses_stq; // @[issue-slot.scala:102:25] assign io_out_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25] assign io_uop_uses_stq_0 = slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_sys_pc2epc; // @[issue-slot.scala:102:25] assign io_out_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_sys_pc2epc_0 = slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_is_unique; // @[issue-slot.scala:102:25] assign io_out_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25] assign io_uop_is_unique_0 = slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_flush_on_commit; // @[issue-slot.scala:102:25] assign io_out_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25] assign io_uop_flush_on_commit_0 = slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ldst_is_rs1; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_is_rs1_0 = slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_ldst; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_0 = slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs1; // @[issue-slot.scala:102:25] assign io_out_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs1_0 = slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs2; // @[issue-slot.scala:102:25] assign io_out_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs2_0 = slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25] reg [5:0] slot_uop_lrs3; // @[issue-slot.scala:102:25] assign io_out_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25] assign io_uop_lrs3_0 = slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_ldst_val; // @[issue-slot.scala:102:25] assign io_out_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25] assign io_uop_ldst_val_0 = slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_dst_rtype; // @[issue-slot.scala:102:25] assign io_out_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25] assign io_uop_dst_rtype_0 = slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_lrs1_rtype; // @[issue-slot.scala:102:25] reg [1:0] slot_uop_lrs2_rtype; // @[issue-slot.scala:102:25] reg slot_uop_frs3_en; // @[issue-slot.scala:102:25] assign io_out_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25] assign io_uop_frs3_en_0 = slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_fp_val; // @[issue-slot.scala:102:25] assign io_out_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fp_val_0 = slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_fp_single; // @[issue-slot.scala:102:25] assign io_out_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25] assign io_uop_fp_single_0 = slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_pf_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_pf_if_0 = slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_ae_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_ae_if_0 = slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_xcpt_ma_if; // @[issue-slot.scala:102:25] assign io_out_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_xcpt_ma_if_0 = slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bp_debug_if; // @[issue-slot.scala:102:25] assign io_out_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bp_debug_if_0 = slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25] reg slot_uop_bp_xcpt_if; // @[issue-slot.scala:102:25] assign io_out_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25] assign io_uop_bp_xcpt_if_0 = slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_debug_fsrc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_fsrc_0 = slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25] reg [1:0] slot_uop_debug_tsrc; // @[issue-slot.scala:102:25] assign io_out_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25] assign io_uop_debug_tsrc_0 = slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25] wire [6:0] next_uop_uopc = io_in_uop_valid_0 ? io_in_uop_bits_uopc_0 : slot_uop_uopc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [31:0] next_uop_inst = io_in_uop_valid_0 ? io_in_uop_bits_inst_0 : slot_uop_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [31:0] next_uop_debug_inst = io_in_uop_valid_0 ? io_in_uop_bits_debug_inst_0 : slot_uop_debug_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_rvc = io_in_uop_valid_0 ? io_in_uop_bits_is_rvc_0 : slot_uop_is_rvc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [39:0] next_uop_debug_pc = io_in_uop_valid_0 ? io_in_uop_bits_debug_pc_0 : slot_uop_debug_pc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_iq_type = io_in_uop_valid_0 ? io_in_uop_bits_iq_type_0 : slot_uop_iq_type; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [9:0] next_uop_fu_code = io_in_uop_valid_0 ? io_in_uop_bits_fu_code_0 : slot_uop_fu_code; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [3:0] next_uop_ctrl_br_type = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_br_type_0 : slot_uop_ctrl_br_type; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_ctrl_op1_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op1_sel_0 : slot_uop_ctrl_op1_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_op2_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op2_sel_0 : slot_uop_ctrl_op2_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_imm_sel = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_imm_sel_0 : slot_uop_ctrl_imm_sel; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ctrl_op_fcn = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_op_fcn_0 : slot_uop_ctrl_op_fcn; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_fcn_dw = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_fcn_dw_0 : slot_uop_ctrl_fcn_dw; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [2:0] next_uop_ctrl_csr_cmd = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_csr_cmd_0 : slot_uop_ctrl_csr_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_load = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_load_0 : slot_uop_ctrl_is_load; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_sta = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_sta_0 : slot_uop_ctrl_is_sta; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ctrl_is_std = io_in_uop_valid_0 ? io_in_uop_bits_ctrl_is_std_0 : slot_uop_ctrl_is_std; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_iw_state = io_in_uop_valid_0 ? io_in_uop_bits_iw_state_0 : slot_uop_iw_state; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_iw_p1_poisoned = ~io_in_uop_valid_0 & slot_uop_iw_p1_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_iw_p2_poisoned = ~io_in_uop_valid_0 & slot_uop_iw_p2_poisoned; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_br = io_in_uop_valid_0 ? io_in_uop_bits_is_br_0 : slot_uop_is_br; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_jalr = io_in_uop_valid_0 ? io_in_uop_bits_is_jalr_0 : slot_uop_is_jalr; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_jal = io_in_uop_valid_0 ? io_in_uop_bits_is_jal_0 : slot_uop_is_jal; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_sfb = io_in_uop_valid_0 ? io_in_uop_bits_is_sfb_0 : slot_uop_is_sfb; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [15:0] next_uop_br_mask = io_in_uop_valid_0 ? io_in_uop_bits_br_mask_0 : slot_uop_br_mask; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [3:0] next_uop_br_tag = io_in_uop_valid_0 ? io_in_uop_bits_br_tag_0 : slot_uop_br_tag; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ftq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ftq_idx_0 : slot_uop_ftq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_edge_inst = io_in_uop_valid_0 ? io_in_uop_bits_edge_inst_0 : slot_uop_edge_inst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_pc_lob = io_in_uop_valid_0 ? io_in_uop_bits_pc_lob_0 : slot_uop_pc_lob; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_taken = io_in_uop_valid_0 ? io_in_uop_bits_taken_0 : slot_uop_taken; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [19:0] next_uop_imm_packed = io_in_uop_valid_0 ? io_in_uop_bits_imm_packed_0 : slot_uop_imm_packed; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [11:0] next_uop_csr_addr = io_in_uop_valid_0 ? io_in_uop_bits_csr_addr_0 : slot_uop_csr_addr; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_rob_idx = io_in_uop_valid_0 ? io_in_uop_bits_rob_idx_0 : slot_uop_rob_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ldq_idx = io_in_uop_valid_0 ? io_in_uop_bits_ldq_idx_0 : slot_uop_ldq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_stq_idx = io_in_uop_valid_0 ? io_in_uop_bits_stq_idx_0 : slot_uop_stq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_rxq_idx = io_in_uop_valid_0 ? io_in_uop_bits_rxq_idx_0 : slot_uop_rxq_idx; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_pdst = io_in_uop_valid_0 ? io_in_uop_bits_pdst_0 : slot_uop_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_prs1 = io_in_uop_valid_0 ? io_in_uop_bits_prs1_0 : slot_uop_prs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_prs2 = io_in_uop_valid_0 ? io_in_uop_bits_prs2_0 : slot_uop_prs2; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_prs3 = io_in_uop_valid_0 ? io_in_uop_bits_prs3_0 : slot_uop_prs3; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_ppred = io_in_uop_valid_0 ? io_in_uop_bits_ppred_0 : slot_uop_ppred; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs1_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs1_busy_0 : slot_uop_prs1_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs2_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs2_busy_0 : slot_uop_prs2_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_prs3_busy = io_in_uop_valid_0 ? io_in_uop_bits_prs3_busy_0 : slot_uop_prs3_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ppred_busy = io_in_uop_valid_0 ? io_in_uop_bits_ppred_busy_0 : slot_uop_ppred_busy; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [6:0] next_uop_stale_pdst = io_in_uop_valid_0 ? io_in_uop_bits_stale_pdst_0 : slot_uop_stale_pdst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_exception = io_in_uop_valid_0 ? io_in_uop_bits_exception_0 : slot_uop_exception; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [63:0] next_uop_exc_cause = io_in_uop_valid_0 ? io_in_uop_bits_exc_cause_0 : slot_uop_exc_cause; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bypassable = io_in_uop_valid_0 ? io_in_uop_bits_bypassable_0 : slot_uop_bypassable; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [4:0] next_uop_mem_cmd = io_in_uop_valid_0 ? io_in_uop_bits_mem_cmd_0 : slot_uop_mem_cmd; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_mem_size = io_in_uop_valid_0 ? io_in_uop_bits_mem_size_0 : slot_uop_mem_size; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_mem_signed = io_in_uop_valid_0 ? io_in_uop_bits_mem_signed_0 : slot_uop_mem_signed; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_fence = io_in_uop_valid_0 ? io_in_uop_bits_is_fence_0 : slot_uop_is_fence; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_fencei = io_in_uop_valid_0 ? io_in_uop_bits_is_fencei_0 : slot_uop_is_fencei; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_amo = io_in_uop_valid_0 ? io_in_uop_bits_is_amo_0 : slot_uop_is_amo; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_uses_ldq = io_in_uop_valid_0 ? io_in_uop_bits_uses_ldq_0 : slot_uop_uses_ldq; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_uses_stq = io_in_uop_valid_0 ? io_in_uop_bits_uses_stq_0 : slot_uop_uses_stq; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_sys_pc2epc = io_in_uop_valid_0 ? io_in_uop_bits_is_sys_pc2epc_0 : slot_uop_is_sys_pc2epc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_is_unique = io_in_uop_valid_0 ? io_in_uop_bits_is_unique_0 : slot_uop_is_unique; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_flush_on_commit = io_in_uop_valid_0 ? io_in_uop_bits_flush_on_commit_0 : slot_uop_flush_on_commit; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ldst_is_rs1 = io_in_uop_valid_0 ? io_in_uop_bits_ldst_is_rs1_0 : slot_uop_ldst_is_rs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_ldst = io_in_uop_valid_0 ? io_in_uop_bits_ldst_0 : slot_uop_ldst; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs1 = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_0 : slot_uop_lrs1; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs2 = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_0 : slot_uop_lrs2; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [5:0] next_uop_lrs3 = io_in_uop_valid_0 ? io_in_uop_bits_lrs3_0 : slot_uop_lrs3; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_ldst_val = io_in_uop_valid_0 ? io_in_uop_bits_ldst_val_0 : slot_uop_ldst_val; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_dst_rtype = io_in_uop_valid_0 ? io_in_uop_bits_dst_rtype_0 : slot_uop_dst_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_lrs1_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs1_rtype_0 : slot_uop_lrs1_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_lrs2_rtype = io_in_uop_valid_0 ? io_in_uop_bits_lrs2_rtype_0 : slot_uop_lrs2_rtype; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_frs3_en = io_in_uop_valid_0 ? io_in_uop_bits_frs3_en_0 : slot_uop_frs3_en; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_fp_val = io_in_uop_valid_0 ? io_in_uop_bits_fp_val_0 : slot_uop_fp_val; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_fp_single = io_in_uop_valid_0 ? io_in_uop_bits_fp_single_0 : slot_uop_fp_single; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_pf_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_pf_if_0 : slot_uop_xcpt_pf_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_ae_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ae_if_0 : slot_uop_xcpt_ae_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_xcpt_ma_if = io_in_uop_valid_0 ? io_in_uop_bits_xcpt_ma_if_0 : slot_uop_xcpt_ma_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bp_debug_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_debug_if_0 : slot_uop_bp_debug_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire next_uop_bp_xcpt_if = io_in_uop_valid_0 ? io_in_uop_bits_bp_xcpt_if_0 : slot_uop_bp_xcpt_if; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_debug_fsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_fsrc_0 : slot_uop_debug_fsrc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire [1:0] next_uop_debug_tsrc = io_in_uop_valid_0 ? io_in_uop_bits_debug_tsrc_0 : slot_uop_debug_tsrc; // @[issue-slot.scala:69:7, :102:25, :103:21] wire _T_11 = state == 2'h2; // @[issue-slot.scala:86:22, :134:25] wire _T_7 = io_grant_0 & state == 2'h1 | io_grant_0 & _T_11 & p1 & p2 & ppred; // @[issue-slot.scala:69:7, :86:22, :87:22, :88:22, :90:22, :133:{26,36,52}, :134:{15,25,40,46,52}] wire _T_12 = io_grant_0 & _T_11; // @[issue-slot.scala:69:7, :134:25, :139:25] wire _GEN = io_kill_0 | _T_7; // @[issue-slot.scala:69:7, :102:25, :131:18, :133:52, :134:63, :139:51] wire _GEN_0 = _GEN | ~(_T_12 & p1); // @[issue-slot.scala:87:22, :102:25, :131:18, :134:63, :139:{25,51}, :140:62, :142:17, :143:23] assign next_uopc = _GEN_0 ? slot_uop_uopc : 7'h3; // @[issue-slot.scala:82:29, :102:25, :131:18, :134:63, :139:51] assign next_lrs1_rtype = _GEN_0 ? slot_uop_lrs1_rtype : 2'h2; // @[issue-slot.scala:83:29, :102:25, :131:18, :134:63, :139:51] wire _GEN_1 = _GEN | ~_T_12 | p1; // @[issue-slot.scala:87:22, :102:25, :131:18, :134:63, :139:{25,51}, :140:62, :142:17] assign next_lrs2_rtype = _GEN_1 ? slot_uop_lrs2_rtype : 2'h2; // @[issue-slot.scala:84:29, :102:25, :131:18, :134:63, :139:51, :140:62, :142:17]
Generate the Verilog code corresponding to the following Chisel files. File UnsafeAXI4ToTL.scala: package ara import chisel3._ import chisel3.util._ import freechips.rocketchip.amba._ import freechips.rocketchip.amba.axi4._ import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.diplomacy._ import freechips.rocketchip.tilelink._ import freechips.rocketchip.util._ class ReorderData(val dataWidth: Int, val respWidth: Int, val userFields: Seq[BundleFieldBase]) extends Bundle { val data = UInt(dataWidth.W) val resp = UInt(respWidth.W) val last = Bool() val user = BundleMap(userFields) } /** Parameters for [[BaseReservableListBuffer]] and all child classes. * * @param numEntries Total number of elements that can be stored in the 'data' RAM * @param numLists Maximum number of linked lists * @param numBeats Maximum number of beats per entry */ case class ReservableListBufferParameters(numEntries: Int, numLists: Int, numBeats: Int) { // Avoid zero-width wires when we call 'log2Ceil' val entryBits = if (numEntries == 1) 1 else log2Ceil(numEntries) val listBits = if (numLists == 1) 1 else log2Ceil(numLists) val beatBits = if (numBeats == 1) 1 else log2Ceil(numBeats) } case class UnsafeAXI4ToTLNode(numTlTxns: Int, wcorrupt: Boolean)(implicit valName: ValName) extends MixedAdapterNode(AXI4Imp, TLImp)( dFn = { case mp => TLMasterPortParameters.v2( masters = mp.masters.zipWithIndex.map { case (m, i) => // Support 'numTlTxns' read requests and 'numTlTxns' write requests at once. val numSourceIds = numTlTxns * 2 TLMasterParameters.v2( name = m.name, sourceId = IdRange(i * numSourceIds, (i + 1) * numSourceIds), nodePath = m.nodePath ) }, echoFields = mp.echoFields, requestFields = AMBAProtField() +: mp.requestFields, responseKeys = mp.responseKeys ) }, uFn = { mp => AXI4SlavePortParameters( slaves = mp.managers.map { m => val maxXfer = TransferSizes(1, mp.beatBytes * (1 << AXI4Parameters.lenBits)) AXI4SlaveParameters( address = m.address, resources = m.resources, regionType = m.regionType, executable = m.executable, nodePath = m.nodePath, supportsWrite = m.supportsPutPartial.intersect(maxXfer), supportsRead = m.supportsGet.intersect(maxXfer), interleavedId = Some(0) // TL2 never interleaves D beats ) }, beatBytes = mp.beatBytes, minLatency = mp.minLatency, responseFields = mp.responseFields, requestKeys = (if (wcorrupt) Seq(AMBACorrupt) else Seq()) ++ mp.requestKeys.filter(_ != AMBAProt) ) } ) class UnsafeAXI4ToTL(numTlTxns: Int, wcorrupt: Boolean)(implicit p: Parameters) extends LazyModule { require(numTlTxns >= 1) require(isPow2(numTlTxns), s"Number of TileLink transactions ($numTlTxns) must be a power of 2") val node = UnsafeAXI4ToTLNode(numTlTxns, wcorrupt) lazy val module = new LazyModuleImp(this) { (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => edgeIn.master.masters.foreach { m => require(m.aligned, "AXI4ToTL requires aligned requests") } val numIds = edgeIn.master.endId val beatBytes = edgeOut.slave.beatBytes val maxTransfer = edgeOut.slave.maxTransfer val maxBeats = maxTransfer / beatBytes // Look for an Error device to redirect bad requests val errorDevs = edgeOut.slave.managers.filter(_.nodePath.last.lazyModule.className == "TLError") require(!errorDevs.isEmpty, "There is no TLError reachable from AXI4ToTL. One must be instantiated.") val errorDev = errorDevs.maxBy(_.maxTransfer) val errorDevAddr = errorDev.address.head.base require( errorDev.supportsPutPartial.contains(maxTransfer), s"Error device supports ${errorDev.supportsPutPartial} PutPartial but must support $maxTransfer" ) require( errorDev.supportsGet.contains(maxTransfer), s"Error device supports ${errorDev.supportsGet} Get but must support $maxTransfer" ) // All of the read-response reordering logic. val listBufData = new ReorderData(beatBytes * 8, edgeIn.bundle.respBits, out.d.bits.user.fields) val listBufParams = ReservableListBufferParameters(numTlTxns, numIds, maxBeats) val listBuffer = if (numTlTxns > 1) { Module(new ReservableListBuffer(listBufData, listBufParams)) } else { Module(new PassthroughListBuffer(listBufData, listBufParams)) } // To differentiate between read and write transaction IDs, we will set the MSB of the TileLink 'source' field to // 0 for read requests and 1 for write requests. val isReadSourceBit = 0.U(1.W) val isWriteSourceBit = 1.U(1.W) /* Read request logic */ val rOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val rBytes1 = in.ar.bits.bytes1() val rSize = OH1ToUInt(rBytes1) val rOk = edgeOut.slave.supportsGetSafe(in.ar.bits.addr, rSize) val rId = if (numTlTxns > 1) { Cat(isReadSourceBit, listBuffer.ioReservedIndex) } else { isReadSourceBit } val rAddr = Mux(rOk, in.ar.bits.addr, errorDevAddr.U | in.ar.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Indicates if there are still valid TileLink source IDs left to use. val canIssueR = listBuffer.ioReserve.ready listBuffer.ioReserve.bits := in.ar.bits.id listBuffer.ioReserve.valid := in.ar.valid && rOut.ready in.ar.ready := rOut.ready && canIssueR rOut.valid := in.ar.valid && canIssueR rOut.bits :<= edgeOut.Get(rId, rAddr, rSize)._2 rOut.bits.user :<= in.ar.bits.user rOut.bits.user.lift(AMBAProt).foreach { rProt => rProt.privileged := in.ar.bits.prot(0) rProt.secure := !in.ar.bits.prot(1) rProt.fetch := in.ar.bits.prot(2) rProt.bufferable := in.ar.bits.cache(0) rProt.modifiable := in.ar.bits.cache(1) rProt.readalloc := in.ar.bits.cache(2) rProt.writealloc := in.ar.bits.cache(3) } /* Write request logic */ // Strip off the MSB, which identifies the transaction as read vs write. val strippedResponseSourceId = if (numTlTxns > 1) { out.d.bits.source((out.d.bits.source).getWidth - 2, 0) } else { // When there's only 1 TileLink transaction allowed for read/write, then this field is always 0. 0.U(1.W) } // Track when a write request burst is in progress. val writeBurstBusy = RegInit(false.B) when(in.w.fire) { writeBurstBusy := !in.w.bits.last } val usedWriteIds = RegInit(0.U(numTlTxns.W)) val canIssueW = !usedWriteIds.andR val usedWriteIdsSet = WireDefault(0.U(numTlTxns.W)) val usedWriteIdsClr = WireDefault(0.U(numTlTxns.W)) usedWriteIds := (usedWriteIds & ~usedWriteIdsClr) | usedWriteIdsSet // Since write responses can show up in the middle of a write burst, we need to ensure the write burst ID doesn't // change mid-burst. val freeWriteIdOHRaw = Wire(UInt(numTlTxns.W)) val freeWriteIdOH = freeWriteIdOHRaw holdUnless !writeBurstBusy val freeWriteIdIndex = OHToUInt(freeWriteIdOH) freeWriteIdOHRaw := ~(leftOR(~usedWriteIds) << 1) & ~usedWriteIds val wOut = Wire(Decoupled(new TLBundleA(edgeOut.bundle))) val wBytes1 = in.aw.bits.bytes1() val wSize = OH1ToUInt(wBytes1) val wOk = edgeOut.slave.supportsPutPartialSafe(in.aw.bits.addr, wSize) val wId = if (numTlTxns > 1) { Cat(isWriteSourceBit, freeWriteIdIndex) } else { isWriteSourceBit } val wAddr = Mux(wOk, in.aw.bits.addr, errorDevAddr.U | in.aw.bits.addr(log2Ceil(beatBytes) - 1, 0)) // Here, we're taking advantage of the Irrevocable behavior of AXI4 (once 'valid' is asserted it must remain // asserted until the handshake occurs). We will only accept W-channel beats when we have a valid AW beat, but // the AW-channel beat won't fire until the final W-channel beat fires. So, we have stable address/size/strb // bits during a W-channel burst. in.aw.ready := wOut.ready && in.w.valid && in.w.bits.last && canIssueW in.w.ready := wOut.ready && in.aw.valid && canIssueW wOut.valid := in.aw.valid && in.w.valid && canIssueW wOut.bits :<= edgeOut.Put(wId, wAddr, wSize, in.w.bits.data, in.w.bits.strb)._2 in.w.bits.user.lift(AMBACorrupt).foreach { wOut.bits.corrupt := _ } wOut.bits.user :<= in.aw.bits.user wOut.bits.user.lift(AMBAProt).foreach { wProt => wProt.privileged := in.aw.bits.prot(0) wProt.secure := !in.aw.bits.prot(1) wProt.fetch := in.aw.bits.prot(2) wProt.bufferable := in.aw.bits.cache(0) wProt.modifiable := in.aw.bits.cache(1) wProt.readalloc := in.aw.bits.cache(2) wProt.writealloc := in.aw.bits.cache(3) } // Merge the AXI4 read/write requests into the TL-A channel. TLArbiter(TLArbiter.roundRobin)(out.a, (0.U, rOut), (in.aw.bits.len, wOut)) /* Read/write response logic */ val okB = Wire(Irrevocable(new AXI4BundleB(edgeIn.bundle))) val okR = Wire(Irrevocable(new AXI4BundleR(edgeIn.bundle))) val dResp = Mux(out.d.bits.denied || out.d.bits.corrupt, AXI4Parameters.RESP_SLVERR, AXI4Parameters.RESP_OKAY) val dHasData = edgeOut.hasData(out.d.bits) val (_dFirst, dLast, _dDone, dCount) = edgeOut.count(out.d) val dNumBeats1 = edgeOut.numBeats1(out.d.bits) // Handle cases where writeack arrives before write is done val writeEarlyAck = (UIntToOH(strippedResponseSourceId) & usedWriteIds) === 0.U out.d.ready := Mux(dHasData, listBuffer.ioResponse.ready, okB.ready && !writeEarlyAck) listBuffer.ioDataOut.ready := okR.ready okR.valid := listBuffer.ioDataOut.valid okB.valid := out.d.valid && !dHasData && !writeEarlyAck listBuffer.ioResponse.valid := out.d.valid && dHasData listBuffer.ioResponse.bits.index := strippedResponseSourceId listBuffer.ioResponse.bits.data.data := out.d.bits.data listBuffer.ioResponse.bits.data.resp := dResp listBuffer.ioResponse.bits.data.last := dLast listBuffer.ioResponse.bits.data.user :<= out.d.bits.user listBuffer.ioResponse.bits.count := dCount listBuffer.ioResponse.bits.numBeats1 := dNumBeats1 okR.bits.id := listBuffer.ioDataOut.bits.listIndex okR.bits.data := listBuffer.ioDataOut.bits.payload.data okR.bits.resp := listBuffer.ioDataOut.bits.payload.resp okR.bits.last := listBuffer.ioDataOut.bits.payload.last okR.bits.user :<= listBuffer.ioDataOut.bits.payload.user // Upon the final beat in a write request, record a mapping from TileLink source ID to AXI write ID. Upon a write // response, mark the write transaction as complete. val writeIdMap = Mem(numTlTxns, UInt(log2Ceil(numIds).W)) val writeResponseId = writeIdMap.read(strippedResponseSourceId) when(wOut.fire) { writeIdMap.write(freeWriteIdIndex, in.aw.bits.id) } when(edgeOut.done(wOut)) { usedWriteIdsSet := freeWriteIdOH } when(okB.fire) { usedWriteIdsClr := UIntToOH(strippedResponseSourceId, numTlTxns) } okB.bits.id := writeResponseId okB.bits.resp := dResp okB.bits.user :<= out.d.bits.user // AXI4 needs irrevocable behaviour in.r <> Queue.irrevocable(okR, 1, flow = true) in.b <> Queue.irrevocable(okB, 1, flow = true) // Unused channels out.b.ready := true.B out.c.valid := false.B out.e.valid := false.B /* Alignment constraints. The AXI4Fragmenter should guarantee all of these constraints. */ def checkRequest[T <: AXI4BundleA](a: IrrevocableIO[T], reqType: String): Unit = { val lReqType = reqType.toLowerCase when(a.valid) { assert(a.bits.len < maxBeats.U, s"$reqType burst length (%d) must be less than $maxBeats", a.bits.len + 1.U) // Narrow transfers and FIXED bursts must be single-beat bursts. when(a.bits.len =/= 0.U) { assert( a.bits.size === log2Ceil(beatBytes).U, s"Narrow $lReqType transfers (%d < $beatBytes bytes) can't be multi-beat bursts (%d beats)", 1.U << a.bits.size, a.bits.len + 1.U ) assert( a.bits.burst =/= AXI4Parameters.BURST_FIXED, s"Fixed $lReqType bursts can't be multi-beat bursts (%d beats)", a.bits.len + 1.U ) } // Furthermore, the transfer size (a.bits.bytes1() + 1.U) must be naturally-aligned to the address (in // particular, during both WRAP and INCR bursts), but this constraint is already checked by TileLink // Monitors. Note that this alignment requirement means that WRAP bursts are identical to INCR bursts. } } checkRequest(in.ar, "Read") checkRequest(in.aw, "Write") } } } object UnsafeAXI4ToTL { def apply(numTlTxns: Int = 1, wcorrupt: Boolean = true)(implicit p: Parameters) = { val axi42tl = LazyModule(new UnsafeAXI4ToTL(numTlTxns, wcorrupt)) axi42tl.node } } /* ReservableListBuffer logic, and associated classes. */ class ResponsePayload[T <: Data](val data: T, val params: ReservableListBufferParameters) extends Bundle { val index = UInt(params.entryBits.W) val count = UInt(params.beatBits.W) val numBeats1 = UInt(params.beatBits.W) } class DataOutPayload[T <: Data](val payload: T, val params: ReservableListBufferParameters) extends Bundle { val listIndex = UInt(params.listBits.W) } /** Abstract base class to unify [[ReservableListBuffer]] and [[PassthroughListBuffer]]. */ abstract class BaseReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends Module { require(params.numEntries > 0) require(params.numLists > 0) val ioReserve = IO(Flipped(Decoupled(UInt(params.listBits.W)))) val ioReservedIndex = IO(Output(UInt(params.entryBits.W))) val ioResponse = IO(Flipped(Decoupled(new ResponsePayload(gen, params)))) val ioDataOut = IO(Decoupled(new DataOutPayload(gen, params))) } /** A modified version of 'ListBuffer' from 'sifive/block-inclusivecache-sifive'. This module forces users to reserve * linked list entries (through the 'ioReserve' port) before writing data into those linked lists (through the * 'ioResponse' port). Each response is tagged to indicate which linked list it is written into. The responses for a * given linked list can come back out-of-order, but they will be read out through the 'ioDataOut' port in-order. * * ==Constructor== * @param gen Chisel type of linked list data element * @param params Other parameters * * ==Module IO== * @param ioReserve Index of list to reserve a new element in * @param ioReservedIndex Index of the entry that was reserved in the linked list, valid when 'ioReserve.fire' * @param ioResponse Payload containing response data and linked-list-entry index * @param ioDataOut Payload containing data read from response linked list and linked list index */ class ReservableListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { val valid = RegInit(0.U(params.numLists.W)) val head = Mem(params.numLists, UInt(params.entryBits.W)) val tail = Mem(params.numLists, UInt(params.entryBits.W)) val used = RegInit(0.U(params.numEntries.W)) val next = Mem(params.numEntries, UInt(params.entryBits.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val dataMems = Seq.fill(params.numBeats) { SyncReadMem(params.numEntries, gen) } val dataIsPresent = RegInit(0.U(params.numEntries.W)) val beats = Mem(params.numEntries, UInt(params.beatBits.W)) // The 'data' SRAM should be single-ported (read-or-write), since dual-ported SRAMs are significantly slower. val dataMemReadEnable = WireDefault(false.B) val dataMemWriteEnable = WireDefault(false.B) assert(!(dataMemReadEnable && dataMemWriteEnable)) // 'freeOH' has a single bit set, which is the least-significant bit that is cleared in 'used'. So, it's the // lowest-index entry in the 'data' RAM which is free. val freeOH = Wire(UInt(params.numEntries.W)) val freeIndex = OHToUInt(freeOH) freeOH := ~(leftOR(~used) << 1) & ~used ioReservedIndex := freeIndex val validSet = WireDefault(0.U(params.numLists.W)) val validClr = WireDefault(0.U(params.numLists.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) val dataIsPresentSet = WireDefault(0.U(params.numEntries.W)) val dataIsPresentClr = WireDefault(0.U(params.numEntries.W)) valid := (valid & ~validClr) | validSet used := (used & ~usedClr) | usedSet dataIsPresent := (dataIsPresent & ~dataIsPresentClr) | dataIsPresentSet /* Reservation logic signals */ val reserveTail = Wire(UInt(params.entryBits.W)) val reserveIsValid = Wire(Bool()) /* Response logic signals */ val responseIndex = Wire(UInt(params.entryBits.W)) val responseListIndex = Wire(UInt(params.listBits.W)) val responseHead = Wire(UInt(params.entryBits.W)) val responseTail = Wire(UInt(params.entryBits.W)) val nextResponseHead = Wire(UInt(params.entryBits.W)) val nextDataIsPresent = Wire(Bool()) val isResponseInOrder = Wire(Bool()) val isEndOfList = Wire(Bool()) val isLastBeat = Wire(Bool()) val isLastResponseBeat = Wire(Bool()) val isLastUnwindBeat = Wire(Bool()) /* Reservation logic */ reserveTail := tail.read(ioReserve.bits) reserveIsValid := valid(ioReserve.bits) ioReserve.ready := !used.andR // When we want to append-to and destroy the same linked list on the same cycle, we need to take special care that we // actually start a new list, rather than appending to a list that's about to disappear. val reserveResponseSameList = ioReserve.bits === responseListIndex val appendToAndDestroyList = ioReserve.fire && ioDataOut.fire && reserveResponseSameList && isEndOfList && isLastBeat when(ioReserve.fire) { validSet := UIntToOH(ioReserve.bits, params.numLists) usedSet := freeOH when(reserveIsValid && !appendToAndDestroyList) { next.write(reserveTail, freeIndex) }.otherwise { head.write(ioReserve.bits, freeIndex) } tail.write(ioReserve.bits, freeIndex) map.write(freeIndex, ioReserve.bits) } /* Response logic */ // The majority of the response logic (reading from and writing to the various RAMs) is common between the // response-from-IO case (ioResponse.fire) and the response-from-unwind case (unwindDataIsValid). // The read from the 'next' RAM should be performed at the address given by 'responseHead'. However, we only use the // 'nextResponseHead' signal when 'isResponseInOrder' is asserted (both in the response-from-IO and // response-from-unwind cases), which implies that 'responseHead' equals 'responseIndex'. 'responseHead' comes after // two back-to-back RAM reads, so indexing into the 'next' RAM with 'responseIndex' is much quicker. responseHead := head.read(responseListIndex) responseTail := tail.read(responseListIndex) nextResponseHead := next.read(responseIndex) nextDataIsPresent := dataIsPresent(nextResponseHead) // Note that when 'isEndOfList' is asserted, 'nextResponseHead' (and therefore 'nextDataIsPresent') is invalid, since // there isn't a next element in the linked list. isResponseInOrder := responseHead === responseIndex isEndOfList := responseHead === responseTail isLastResponseBeat := ioResponse.bits.count === ioResponse.bits.numBeats1 // When a response's last beat is sent to the output channel, mark it as completed. This can happen in two // situations: // 1. We receive an in-order response, which travels straight from 'ioResponse' to 'ioDataOut'. The 'data' SRAM // reservation was never needed. // 2. An entry is read out of the 'data' SRAM (within the unwind FSM). when(ioDataOut.fire && isLastBeat) { // Mark the reservation as no-longer-used. usedClr := UIntToOH(responseIndex, params.numEntries) // If the response is in-order, then we're popping an element from this linked list. when(isEndOfList) { // Once we pop the last element from a linked list, mark it as no-longer-present. validClr := UIntToOH(responseListIndex, params.numLists) }.otherwise { // Move the linked list's head pointer to the new head pointer. head.write(responseListIndex, nextResponseHead) } } // If we get an out-of-order response, then stash it in the 'data' SRAM for later unwinding. when(ioResponse.fire && !isResponseInOrder) { dataMemWriteEnable := true.B when(isLastResponseBeat) { dataIsPresentSet := UIntToOH(ioResponse.bits.index, params.numEntries) beats.write(ioResponse.bits.index, ioResponse.bits.numBeats1) } } // Use the 'ioResponse.bits.count' index (AKA the beat number) to select which 'data' SRAM to write to. val responseCountOH = UIntToOH(ioResponse.bits.count, params.numBeats) (responseCountOH.asBools zip dataMems) foreach { case (select, seqMem) => when(select && dataMemWriteEnable) { seqMem.write(ioResponse.bits.index, ioResponse.bits.data) } } /* Response unwind logic */ // Unwind FSM state definitions val sIdle :: sUnwinding :: Nil = Enum(2) val unwindState = RegInit(sIdle) val busyUnwinding = unwindState === sUnwinding val startUnwind = Wire(Bool()) val stopUnwind = Wire(Bool()) when(startUnwind) { unwindState := sUnwinding }.elsewhen(stopUnwind) { unwindState := sIdle } assert(!(startUnwind && stopUnwind)) // Start the unwind FSM when there is an old out-of-order response stored in the 'data' SRAM that is now about to // become the next in-order response. As noted previously, when 'isEndOfList' is asserted, 'nextDataIsPresent' is // invalid. // // Note that since an in-order response from 'ioResponse' to 'ioDataOut' starts the unwind FSM, we don't have to // worry about overwriting the 'data' SRAM's output when we start the unwind FSM. startUnwind := ioResponse.fire && isResponseInOrder && isLastResponseBeat && !isEndOfList && nextDataIsPresent // Stop the unwind FSM when the output channel consumes the final beat of an element from the unwind FSM, and one of // two things happens: // 1. We're still waiting for the next in-order response for this list (!nextDataIsPresent) // 2. There are no more outstanding responses in this list (isEndOfList) // // Including 'busyUnwinding' ensures this is a single-cycle pulse, and it never fires while in-order transactions are // passing from 'ioResponse' to 'ioDataOut'. stopUnwind := busyUnwinding && ioDataOut.fire && isLastUnwindBeat && (!nextDataIsPresent || isEndOfList) val isUnwindBurstOver = Wire(Bool()) val startNewBurst = startUnwind || (isUnwindBurstOver && dataMemReadEnable) // Track the number of beats left to unwind for each list entry. At the start of a new burst, we flop the number of // beats in this burst (minus 1) into 'unwindBeats1', and we reset the 'beatCounter' counter. With each beat, we // increment 'beatCounter' until it reaches 'unwindBeats1'. val unwindBeats1 = Reg(UInt(params.beatBits.W)) val nextBeatCounter = Wire(UInt(params.beatBits.W)) val beatCounter = RegNext(nextBeatCounter) isUnwindBurstOver := beatCounter === unwindBeats1 when(startNewBurst) { unwindBeats1 := beats.read(nextResponseHead) nextBeatCounter := 0.U }.elsewhen(dataMemReadEnable) { nextBeatCounter := beatCounter + 1.U }.otherwise { nextBeatCounter := beatCounter } // When unwinding, feed the next linked-list head pointer (read out of the 'next' RAM) back so we can unwind the next // entry in this linked list. Only update the pointer when we're actually moving to the next 'data' SRAM entry (which // happens at the start of reading a new stored burst). val unwindResponseIndex = RegEnable(nextResponseHead, startNewBurst) responseIndex := Mux(busyUnwinding, unwindResponseIndex, ioResponse.bits.index) // Hold 'nextResponseHead' static while we're in the middle of unwinding a multi-beat burst entry. We don't want the // SRAM read address to shift while reading beats from a burst. Note that this is identical to 'nextResponseHead // holdUnless startNewBurst', but 'unwindResponseIndex' already implements the 'RegEnable' signal in 'holdUnless'. val unwindReadAddress = Mux(startNewBurst, nextResponseHead, unwindResponseIndex) // The 'data' SRAM's output is valid if we read from the SRAM on the previous cycle. The SRAM's output stays valid // until it is consumed by the output channel (and if we don't read from the SRAM again on that same cycle). val unwindDataIsValid = RegInit(false.B) when(dataMemReadEnable) { unwindDataIsValid := true.B }.elsewhen(ioDataOut.fire) { unwindDataIsValid := false.B } isLastUnwindBeat := isUnwindBurstOver && unwindDataIsValid // Indicates if this is the last beat for both 'ioResponse'-to-'ioDataOut' and unwind-to-'ioDataOut' beats. isLastBeat := Mux(busyUnwinding, isLastUnwindBeat, isLastResponseBeat) // Select which SRAM to read from based on the beat counter. val dataOutputVec = Wire(Vec(params.numBeats, gen)) val nextBeatCounterOH = UIntToOH(nextBeatCounter, params.numBeats) (nextBeatCounterOH.asBools zip dataMems).zipWithIndex foreach { case ((select, seqMem), i) => dataOutputVec(i) := seqMem.read(unwindReadAddress, select && dataMemReadEnable) } // Select the current 'data' SRAM output beat, and save the output in a register in case we're being back-pressured // by 'ioDataOut'. This implements the functionality of 'readAndHold', but only on the single SRAM we're reading // from. val dataOutput = dataOutputVec(beatCounter) holdUnless RegNext(dataMemReadEnable) // Mark 'data' burst entries as no-longer-present as they get read out of the SRAM. when(dataMemReadEnable) { dataIsPresentClr := UIntToOH(unwindReadAddress, params.numEntries) } // As noted above, when starting the unwind FSM, we know the 'data' SRAM's output isn't valid, so it's safe to issue // a read command. Otherwise, only issue an SRAM read when the next 'unwindState' is 'sUnwinding', and if we know // we're not going to overwrite the SRAM's current output (the SRAM output is already valid, and it's not going to be // consumed by the output channel). val dontReadFromDataMem = unwindDataIsValid && !ioDataOut.ready dataMemReadEnable := startUnwind || (busyUnwinding && !stopUnwind && !dontReadFromDataMem) // While unwinding, prevent new reservations from overwriting the current 'map' entry that we're using. We need // 'responseListIndex' to be coherent for the entire unwind process. val rawResponseListIndex = map.read(responseIndex) val unwindResponseListIndex = RegEnable(rawResponseListIndex, startNewBurst) responseListIndex := Mux(busyUnwinding, unwindResponseListIndex, rawResponseListIndex) // Accept responses either when they can be passed through to the output channel, or if they're out-of-order and are // just going to be stashed in the 'data' SRAM. Never accept a response payload when we're busy unwinding, since that // could result in reading from and writing to the 'data' SRAM in the same cycle, and we want that SRAM to be // single-ported. ioResponse.ready := (ioDataOut.ready || !isResponseInOrder) && !busyUnwinding // Either pass an in-order response to the output channel, or data read from the unwind FSM. ioDataOut.valid := Mux(busyUnwinding, unwindDataIsValid, ioResponse.valid && isResponseInOrder) ioDataOut.bits.listIndex := responseListIndex ioDataOut.bits.payload := Mux(busyUnwinding, dataOutput, ioResponse.bits.data) // It's an error to get a response that isn't associated with a valid linked list. when(ioResponse.fire || unwindDataIsValid) { assert( valid(responseListIndex), "No linked list exists at index %d, mapped from %d", responseListIndex, responseIndex ) } when(busyUnwinding && dataMemReadEnable) { assert(isResponseInOrder, "Unwind FSM must read entries from SRAM in order") } } /** Specialized version of [[ReservableListBuffer]] for the case of numEntries == 1. * * Much of the complex logic in [[ReservableListBuffer]] can disappear in this case. For instance, we don't have to * reorder any responses, or store any linked lists. */ class PassthroughListBuffer[T <: Data](gen: T, params: ReservableListBufferParameters) extends BaseReservableListBuffer(gen, params) { require(params.numEntries == 1, s"PassthroughListBuffer is only valid when 'numEntries' (${params.numEntries}) is 1") val used = RegInit(0.U(params.numEntries.W)) val map = Mem(params.numEntries, UInt(params.listBits.W)) val usedSet = WireDefault(0.U(params.numEntries.W)) val usedClr = WireDefault(0.U(params.numEntries.W)) used := (used & ~usedClr) | usedSet ioReserve.ready := used === 0.U // Store which list index was reserved, we need to return this value when we get a response. when(ioReserve.fire) { usedSet := 1.U map.write(0.U, ioReserve.bits) } // There's only one valid linked list entry, which is at index 0. ioReservedIndex := 0.U val isLastResponseBeat = ioResponse.bits.count === ioResponse.bits.numBeats1 // Mark the linked list as empty when we get the last beat in a response. // Note that 'ioResponse.fire === ioDataOut.fire'. when(ioResponse.fire && isLastResponseBeat) { usedClr := 1.U } // Always pass the response data straight through, since we never need to reorder the response data. ioDataOut.bits.listIndex := map.read(0.U) ioDataOut.bits.payload := ioResponse.bits.data ioDataOut.valid := ioResponse.valid ioResponse.ready := ioDataOut.ready }
module dataMems_222( // @[UnsafeAXI4ToTL.scala:365:62] input [4:0] R0_addr, input R0_en, input R0_clk, output [66:0] R0_data, input [4:0] W0_addr, input W0_en, input W0_clk, input [66:0] W0_data ); dataMems_0_ext dataMems_0_ext ( // @[UnsafeAXI4ToTL.scala:365:62] .R0_addr (R0_addr), .R0_en (R0_en), .R0_clk (R0_clk), .R0_data (R0_data), .W0_addr (W0_addr), .W0_en (W0_en), .W0_clk (W0_clk), .W0_data (W0_data) ); // @[UnsafeAXI4ToTL.scala:365:62] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Crossing.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.interrupts import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.util.{SynchronizerShiftReg, AsyncResetReg} @deprecated("IntXing does not ensure interrupt source is glitch free. Use IntSyncSource and IntSyncSink", "rocket-chip 1.2") class IntXing(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val intnode = IntAdapterNode() lazy val module = new Impl class Impl extends LazyModuleImp(this) { (intnode.in zip intnode.out) foreach { case ((in, _), (out, _)) => out := SynchronizerShiftReg(in, sync) } } } object IntSyncCrossingSource { def apply(alreadyRegistered: Boolean = false)(implicit p: Parameters) = { val intsource = LazyModule(new IntSyncCrossingSource(alreadyRegistered)) intsource.node } } class IntSyncCrossingSource(alreadyRegistered: Boolean = false)(implicit p: Parameters) extends LazyModule { val node = IntSyncSourceNode(alreadyRegistered) lazy val module = if (alreadyRegistered) (new ImplRegistered) else (new Impl) class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := AsyncResetReg(Cat(in.reverse)).asBools } } class ImplRegistered extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.sync.size).getOrElse(0) override def desiredName = s"IntSyncCrossingSource_n${node.out.size}x${outSize}_Registered" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out.sync := in } } } object IntSyncCrossingSink { @deprecated("IntSyncCrossingSink which used the `sync` parameter to determine crossing type is deprecated. Use IntSyncAsyncCrossingSink, IntSyncRationalCrossingSink, or IntSyncSyncCrossingSink instead for > 1, 1, and 0 sync values respectively", "rocket-chip 1.2") def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncAsyncCrossingSink(sync: Int = 3)(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(sync) lazy val module = new Impl class Impl extends LazyModuleImp(this) { override def desiredName = s"IntSyncAsyncCrossingSink_n${node.out.size}x${node.out.head._1.size}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := SynchronizerShiftReg(in.sync, sync) } } } object IntSyncAsyncCrossingSink { def apply(sync: Int = 3)(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncAsyncCrossingSink(sync)) intsink.node } } class IntSyncSyncCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(0) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncSyncCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := in.sync } } } object IntSyncSyncCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncSyncCrossingSink()) intsink.node } } class IntSyncRationalCrossingSink()(implicit p: Parameters) extends LazyModule { val node = IntSyncSinkNode(1) lazy val module = new Impl class Impl extends LazyModuleImp(this) { def outSize = node.out.headOption.map(_._1.size).getOrElse(0) override def desiredName = s"IntSyncRationalCrossingSink_n${node.out.size}x${outSize}" (node.in zip node.out) foreach { case ((in, edgeIn), (out, edgeOut)) => out := RegNext(in.sync) } } } object IntSyncRationalCrossingSink { def apply()(implicit p: Parameters) = { val intsink = LazyModule(new IntSyncRationalCrossingSink()) intsink.node } } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File MixedNode.scala: package org.chipsalliance.diplomacy.nodes import chisel3.{Data, DontCare, Wire} import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.{Field, Parameters} import org.chipsalliance.diplomacy.ValName import org.chipsalliance.diplomacy.sourceLine /** One side metadata of a [[Dangle]]. * * Describes one side of an edge going into or out of a [[BaseNode]]. * * @param serial * the global [[BaseNode.serial]] number of the [[BaseNode]] that this [[HalfEdge]] connects to. * @param index * the `index` in the [[BaseNode]]'s input or output port list that this [[HalfEdge]] belongs to. */ case class HalfEdge(serial: Int, index: Int) extends Ordered[HalfEdge] { import scala.math.Ordered.orderingToOrdered def compare(that: HalfEdge): Int = HalfEdge.unapply(this).compare(HalfEdge.unapply(that)) } /** [[Dangle]] captures the `IO` information of a [[LazyModule]] and which two [[BaseNode]]s the [[Edges]]/[[Bundle]] * connects. * * [[Dangle]]s are generated by [[BaseNode.instantiate]] using [[MixedNode.danglesOut]] and [[MixedNode.danglesIn]] , * [[LazyModuleImp.instantiate]] connects those that go to internal or explicit IO connections in a [[LazyModule]]. * * @param source * the source [[HalfEdge]] of this [[Dangle]], which captures the source [[BaseNode]] and the port `index` within * that [[BaseNode]]. * @param sink * sink [[HalfEdge]] of this [[Dangle]], which captures the sink [[BaseNode]] and the port `index` within that * [[BaseNode]]. * @param flipped * flip or not in [[AutoBundle.makeElements]]. If true this corresponds to `danglesOut`, if false it corresponds to * `danglesIn`. * @param dataOpt * actual [[Data]] for the hardware connection. Can be empty if this belongs to a cloned module */ case class Dangle(source: HalfEdge, sink: HalfEdge, flipped: Boolean, name: String, dataOpt: Option[Data]) { def data = dataOpt.get } /** [[Edges]] is a collection of parameters describing the functionality and connection for an interface, which is often * derived from the interconnection protocol and can inform the parameterization of the hardware bundles that actually * implement the protocol. */ case class Edges[EI, EO](in: Seq[EI], out: Seq[EO]) /** A field available in [[Parameters]] used to determine whether [[InwardNodeImp.monitor]] will be called. */ case object MonitorsEnabled extends Field[Boolean](true) /** When rendering the edge in a graphical format, flip the order in which the edges' source and sink are presented. * * For example, when rendering graphML, yEd by default tries to put the source node vertically above the sink node, but * [[RenderFlipped]] inverts this relationship. When a particular [[LazyModule]] contains both source nodes and sink * nodes, flipping the rendering of one node's edge will usual produce a more concise visual layout for the * [[LazyModule]]. */ case object RenderFlipped extends Field[Boolean](false) /** The sealed node class in the package, all node are derived from it. * * @param inner * Sink interface implementation. * @param outer * Source interface implementation. * @param valName * val name of this node. * @tparam DI * Downward-flowing parameters received on the inner side of the node. It is usually a brunch of parameters * describing the protocol parameters from a source. For an [[InwardNode]], it is determined by the connected * [[OutwardNode]]. Since it can be connected to multiple sources, this parameter is always a Seq of source port * parameters. * @tparam UI * Upward-flowing parameters generated by the inner side of the node. It is usually a brunch of parameters describing * the protocol parameters of a sink. For an [[InwardNode]], it is determined itself. * @tparam EI * Edge Parameters describing a connection on the inner side of the node. It is usually a brunch of transfers * specified for a sink according to protocol. * @tparam BI * Bundle type used when connecting to the inner side of the node. It is a hardware interface of this sink interface. * It should extends from [[chisel3.Data]], which represents the real hardware. * @tparam DO * Downward-flowing parameters generated on the outer side of the node. It is usually a brunch of parameters * describing the protocol parameters of a source. For an [[OutwardNode]], it is determined itself. * @tparam UO * Upward-flowing parameters received by the outer side of the node. It is usually a brunch of parameters describing * the protocol parameters from a sink. For an [[OutwardNode]], it is determined by the connected [[InwardNode]]. * Since it can be connected to multiple sinks, this parameter is always a Seq of sink port parameters. * @tparam EO * Edge Parameters describing a connection on the outer side of the node. It is usually a brunch of transfers * specified for a source according to protocol. * @tparam BO * Bundle type used when connecting to the outer side of the node. It is a hardware interface of this source * interface. It should extends from [[chisel3.Data]], which represents the real hardware. * * @note * Call Graph of [[MixedNode]] * - line `─`: source is process by a function and generate pass to others * - Arrow `→`: target of arrow is generated by source * * {{{ * (from the other node) * ┌─────────────────────────────────────────────────────────[[InwardNode.uiParams]]─────────────┐ * ↓ │ * (binding node when elaboration) [[OutwardNode.uoParams]]────────────────────────[[MixedNode.mapParamsU]]→──────────┐ │ * [[InwardNode.accPI]] │ │ │ * │ │ (based on protocol) │ * │ │ [[MixedNode.inner.edgeI]] │ * │ │ ↓ │ * ↓ │ │ │ * (immobilize after elaboration) (inward port from [[OutwardNode]]) │ ↓ │ * [[InwardNode.iBindings]]──┐ [[MixedNode.iDirectPorts]]────────────────────→[[MixedNode.iPorts]] [[InwardNode.uiParams]] │ * │ │ ↑ │ │ │ * │ │ │ [[OutwardNode.doParams]] │ │ * │ │ │ (from the other node) │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * │ │ │ └────────┬──────────────┤ │ * │ │ │ │ │ │ * │ │ │ │ (based on protocol) │ * │ │ │ │ [[MixedNode.inner.edgeI]] │ * │ │ │ │ │ │ * │ │ (from the other node) │ ↓ │ * │ └───[[OutwardNode.oPortMapping]] [[OutwardNode.oStar]] │ [[MixedNode.edgesIn]]───┐ │ * │ ↑ ↑ │ │ ↓ │ * │ │ │ │ │ [[MixedNode.in]] │ * │ │ │ │ ↓ ↑ │ * │ (solve star connection) │ │ │ [[MixedNode.bundleIn]]──┘ │ * ├───[[MixedNode.resolveStar]]→─┼─────────────────────────────┤ └────────────────────────────────────┐ │ * │ │ │ [[MixedNode.bundleOut]]─┐ │ │ * │ │ │ ↑ ↓ │ │ * │ │ │ │ [[MixedNode.out]] │ │ * │ ↓ ↓ │ ↑ │ │ * │ ┌─────[[InwardNode.iPortMapping]] [[InwardNode.iStar]] [[MixedNode.edgesOut]]──┘ │ │ * │ │ (from the other node) ↑ │ │ * │ │ │ │ │ │ * │ │ │ [[MixedNode.outer.edgeO]] │ │ * │ │ │ (based on protocol) │ │ * │ │ │ │ │ │ * │ │ │ ┌────────────────────────────────────────┤ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * │ │ │ │ │ │ │ * (immobilize after elaboration)│ ↓ │ │ │ │ * [[OutwardNode.oBindings]]─┘ [[MixedNode.oDirectPorts]]───→[[MixedNode.oPorts]] [[OutwardNode.doParams]] │ │ * ↑ (inward port from [[OutwardNode]]) │ │ │ │ * │ ┌─────────────────────────────────────────┤ │ │ │ * │ │ │ │ │ │ * │ │ │ │ │ │ * [[OutwardNode.accPO]] │ ↓ │ │ │ * (binding node when elaboration) │ [[InwardNode.diParams]]─────→[[MixedNode.mapParamsD]]────────────────────────────┘ │ │ * │ ↑ │ │ * │ └──────────────────────────────────────────────────────────────────────────────────────────┘ │ * └──────────────────────────────────────────────────────────────────────────────────────────────────────────┘ * }}} */ abstract class MixedNode[DI, UI, EI, BI <: Data, DO, UO, EO, BO <: Data]( val inner: InwardNodeImp[DI, UI, EI, BI], val outer: OutwardNodeImp[DO, UO, EO, BO] )( implicit valName: ValName) extends BaseNode with NodeHandle[DI, UI, EI, BI, DO, UO, EO, BO] with InwardNode[DI, UI, BI] with OutwardNode[DO, UO, BO] { // Generate a [[NodeHandle]] with inward and outward node are both this node. val inward = this val outward = this /** Debug info of nodes binding. */ def bindingInfo: String = s"""$iBindingInfo |$oBindingInfo |""".stripMargin /** Debug info of ports connecting. */ def connectedPortsInfo: String = s"""${oPorts.size} outward ports connected: [${oPorts.map(_._2.name).mkString(",")}] |${iPorts.size} inward ports connected: [${iPorts.map(_._2.name).mkString(",")}] |""".stripMargin /** Debug info of parameters propagations. */ def parametersInfo: String = s"""${doParams.size} downstream outward parameters: [${doParams.mkString(",")}] |${uoParams.size} upstream outward parameters: [${uoParams.mkString(",")}] |${diParams.size} downstream inward parameters: [${diParams.mkString(",")}] |${uiParams.size} upstream inward parameters: [${uiParams.mkString(",")}] |""".stripMargin /** For a given node, converts [[OutwardNode.accPO]] and [[InwardNode.accPI]] to [[MixedNode.oPortMapping]] and * [[MixedNode.iPortMapping]]. * * Given counts of known inward and outward binding and inward and outward star bindings, return the resolved inward * stars and outward stars. * * This method will also validate the arguments and throw a runtime error if the values are unsuitable for this type * of node. * * @param iKnown * Number of known-size ([[BIND_ONCE]]) input bindings. * @param oKnown * Number of known-size ([[BIND_ONCE]]) output bindings. * @param iStar * Number of unknown size ([[BIND_STAR]]) input bindings. * @param oStar * Number of unknown size ([[BIND_STAR]]) output bindings. * @return * A Tuple of the resolved number of input and output connections. */ protected[diplomacy] def resolveStar(iKnown: Int, oKnown: Int, iStar: Int, oStar: Int): (Int, Int) /** Function to generate downward-flowing outward params from the downward-flowing input params and the current output * ports. * * @param n * The size of the output sequence to generate. * @param p * Sequence of downward-flowing input parameters of this node. * @return * A `n`-sized sequence of downward-flowing output edge parameters. */ protected[diplomacy] def mapParamsD(n: Int, p: Seq[DI]): Seq[DO] /** Function to generate upward-flowing input parameters from the upward-flowing output parameters [[uiParams]]. * * @param n * Size of the output sequence. * @param p * Upward-flowing output edge parameters. * @return * A n-sized sequence of upward-flowing input edge parameters. */ protected[diplomacy] def mapParamsU(n: Int, p: Seq[UO]): Seq[UI] /** @return * The sink cardinality of the node, the number of outputs bound with [[BIND_QUERY]] summed with inputs bound with * [[BIND_STAR]]. */ protected[diplomacy] lazy val sinkCard: Int = oBindings.count(_._3 == BIND_QUERY) + iBindings.count(_._3 == BIND_STAR) /** @return * The source cardinality of this node, the number of inputs bound with [[BIND_QUERY]] summed with the number of * output bindings bound with [[BIND_STAR]]. */ protected[diplomacy] lazy val sourceCard: Int = iBindings.count(_._3 == BIND_QUERY) + oBindings.count(_._3 == BIND_STAR) /** @return list of nodes involved in flex bindings with this node. */ protected[diplomacy] lazy val flexes: Seq[BaseNode] = oBindings.filter(_._3 == BIND_FLEX).map(_._2) ++ iBindings.filter(_._3 == BIND_FLEX).map(_._2) /** Resolves the flex to be either source or sink and returns the offset where the [[BIND_STAR]] operators begin * greedily taking up the remaining connections. * * @return * A value >= 0 if it is sink cardinality, a negative value for source cardinality. The magnitude of the return * value is not relevant. */ protected[diplomacy] lazy val flexOffset: Int = { /** Recursively performs a depth-first search of the [[flexes]], [[BaseNode]]s connected to this node with flex * operators. The algorithm bottoms out when we either get to a node we have already visited or when we get to a * connection that is not a flex and can set the direction for us. Otherwise, recurse by visiting the `flexes` of * each node in the current set and decide whether they should be added to the set or not. * * @return * the mapping of [[BaseNode]] indexed by their serial numbers. */ def DFS(v: BaseNode, visited: Map[Int, BaseNode]): Map[Int, BaseNode] = { if (visited.contains(v.serial) || !v.flexibleArityDirection) { visited } else { v.flexes.foldLeft(visited + (v.serial -> v))((sum, n) => DFS(n, sum)) } } /** Determine which [[BaseNode]] are involved in resolving the flex connections to/from this node. * * @example * {{{ * a :*=* b :*=* c * d :*=* b * e :*=* f * }}} * * `flexSet` for `a`, `b`, `c`, or `d` will be `Set(a, b, c, d)` `flexSet` for `e` or `f` will be `Set(e,f)` */ val flexSet = DFS(this, Map()).values /** The total number of :*= operators where we're on the left. */ val allSink = flexSet.map(_.sinkCard).sum /** The total number of :=* operators used when we're on the right. */ val allSource = flexSet.map(_.sourceCard).sum require( allSink == 0 || allSource == 0, s"The nodes ${flexSet.map(_.name)} which are inter-connected by :*=* have ${allSink} :*= operators and ${allSource} :=* operators connected to them, making it impossible to determine cardinality inference direction." ) allSink - allSource } /** @return A value >= 0 if it is sink cardinality, a negative value for source cardinality. */ protected[diplomacy] def edgeArityDirection(n: BaseNode): Int = { if (flexibleArityDirection) flexOffset else if (n.flexibleArityDirection) n.flexOffset else 0 } /** For a node which is connected between two nodes, select the one that will influence the direction of the flex * resolution. */ protected[diplomacy] def edgeAritySelect(n: BaseNode, l: => Int, r: => Int): Int = { val dir = edgeArityDirection(n) if (dir < 0) l else if (dir > 0) r else 1 } /** Ensure that the same node is not visited twice in resolving `:*=`, etc operators. */ private var starCycleGuard = false /** Resolve all the star operators into concrete indicies. As connections are being made, some may be "star" * connections which need to be resolved. In some way to determine how many actual edges they correspond to. We also * need to build up the ranges of edges which correspond to each binding operator, so that We can apply the correct * edge parameters and later build up correct bundle connections. * * [[oPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that oPort (binding * operator). [[iPortMapping]]: `Seq[(Int, Int)]` where each item is the range of edges corresponding to that iPort * (binding operator). [[oStar]]: `Int` the value to return for this node `N` for any `N :*= foo` or `N :*=* foo :*= * bar` [[iStar]]: `Int` the value to return for this node `N` for any `foo :=* N` or `bar :=* foo :*=* N` */ protected[diplomacy] lazy val ( oPortMapping: Seq[(Int, Int)], iPortMapping: Seq[(Int, Int)], oStar: Int, iStar: Int ) = { try { if (starCycleGuard) throw StarCycleException() starCycleGuard = true // For a given node N... // Number of foo :=* N // + Number of bar :=* foo :*=* N val oStars = oBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) < 0) } // Number of N :*= foo // + Number of N :*=* foo :*= bar val iStars = iBindings.count { case (_, n, b, _, _) => b == BIND_STAR || (b == BIND_FLEX && edgeArityDirection(n) > 0) } // 1 for foo := N // + bar.iStar for bar :*= foo :*=* N // + foo.iStar for foo :*= N // + 0 for foo :=* N val oKnown = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, 0, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => 0 } }.sum // 1 for N := foo // + bar.oStar for N :*=* foo :=* bar // + foo.oStar for N :=* foo // + 0 for N :*= foo val iKnown = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, 0) case BIND_QUERY => n.oStar case BIND_STAR => 0 } }.sum // Resolve star depends on the node subclass to implement the algorithm for this. val (iStar, oStar) = resolveStar(iKnown, oKnown, iStars, oStars) // Cumulative list of resolved outward binding range starting points val oSum = oBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, oStar, n.iStar) case BIND_QUERY => n.iStar case BIND_STAR => oStar } }.scanLeft(0)(_ + _) // Cumulative list of resolved inward binding range starting points val iSum = iBindings.map { case (_, n, b, _, _) => b match { case BIND_ONCE => 1 case BIND_FLEX => edgeAritySelect(n, n.oStar, iStar) case BIND_QUERY => n.oStar case BIND_STAR => iStar } }.scanLeft(0)(_ + _) // Create ranges for each binding based on the running sums and return // those along with resolved values for the star operations. (oSum.init.zip(oSum.tail), iSum.init.zip(iSum.tail), oStar, iStar) } catch { case c: StarCycleException => throw c.copy(loop = context +: c.loop) } } /** Sequence of inward ports. * * This should be called after all star bindings are resolved. * * Each element is: `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. * `n` Instance of inward node. `p` View of [[Parameters]] where this connection was made. `s` Source info where this * connection was made in the source code. */ protected[diplomacy] lazy val oDirectPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oBindings.flatMap { case (i, n, _, p, s) => // for each binding operator in this node, look at what it connects to val (start, end) = n.iPortMapping(i) (start until end).map { j => (j, n, p, s) } } /** Sequence of outward ports. * * This should be called after all star bindings are resolved. * * `j` Port index of this binding in the Node's [[oPortMapping]] on the other side of the binding. `n` Instance of * outward node. `p` View of [[Parameters]] where this connection was made. `s` [[SourceInfo]] where this connection * was made in the source code. */ protected[diplomacy] lazy val iDirectPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iBindings.flatMap { case (i, n, _, p, s) => // query this port index range of this node in the other side of node. val (start, end) = n.oPortMapping(i) (start until end).map { j => (j, n, p, s) } } // Ephemeral nodes ( which have non-None iForward/oForward) have in_degree = out_degree // Thus, there must exist an Eulerian path and the below algorithms terminate @scala.annotation.tailrec private def oTrace( tuple: (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) ): (Int, InwardNode[DO, UO, BO], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.iForward(i) match { case None => (i, n, p, s) case Some((j, m)) => oTrace((j, m, p, s)) } } @scala.annotation.tailrec private def iTrace( tuple: (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) ): (Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo) = tuple match { case (i, n, p, s) => n.oForward(i) match { case None => (i, n, p, s) case Some((j, m)) => iTrace((j, m, p, s)) } } /** Final output ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - Numeric index of this binding in the [[InwardNode]] on the other end. * - [[InwardNode]] on the other end of this binding. * - A view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val oPorts: Seq[(Int, InwardNode[DO, UO, BO], Parameters, SourceInfo)] = oDirectPorts.map(oTrace) /** Final input ports after all stars and port forwarding (e.g. [[EphemeralNode]]s) have been resolved. * * Each Port is a tuple of: * - numeric index of this binding in [[OutwardNode]] on the other end. * - [[OutwardNode]] on the other end of this binding. * - a view of [[Parameters]] where the binding occurred. * - [[SourceInfo]] for source-level error reporting. */ lazy val iPorts: Seq[(Int, OutwardNode[DI, UI, BI], Parameters, SourceInfo)] = iDirectPorts.map(iTrace) private var oParamsCycleGuard = false protected[diplomacy] lazy val diParams: Seq[DI] = iPorts.map { case (i, n, _, _) => n.doParams(i) } protected[diplomacy] lazy val doParams: Seq[DO] = { try { if (oParamsCycleGuard) throw DownwardCycleException() oParamsCycleGuard = true val o = mapParamsD(oPorts.size, diParams) require( o.size == oPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of outward ports should equal the number of produced outward parameters. |$context |$connectedPortsInfo |Downstreamed inward parameters: [${diParams.mkString(",")}] |Produced outward parameters: [${o.mkString(",")}] |""".stripMargin ) o.map(outer.mixO(_, this)) } catch { case c: DownwardCycleException => throw c.copy(loop = context +: c.loop) } } private var iParamsCycleGuard = false protected[diplomacy] lazy val uoParams: Seq[UO] = oPorts.map { case (o, n, _, _) => n.uiParams(o) } protected[diplomacy] lazy val uiParams: Seq[UI] = { try { if (iParamsCycleGuard) throw UpwardCycleException() iParamsCycleGuard = true val i = mapParamsU(iPorts.size, uoParams) require( i.size == iPorts.size, s"""Diplomacy has detected a problem with your graph: |At the following node, the number of inward ports should equal the number of produced inward parameters. |$context |$connectedPortsInfo |Upstreamed outward parameters: [${uoParams.mkString(",")}] |Produced inward parameters: [${i.mkString(",")}] |""".stripMargin ) i.map(inner.mixI(_, this)) } catch { case c: UpwardCycleException => throw c.copy(loop = context +: c.loop) } } /** Outward edge parameters. */ protected[diplomacy] lazy val edgesOut: Seq[EO] = (oPorts.zip(doParams)).map { case ((i, n, p, s), o) => outer.edgeO(o, n.uiParams(i), p, s) } /** Inward edge parameters. */ protected[diplomacy] lazy val edgesIn: Seq[EI] = (iPorts.zip(uiParams)).map { case ((o, n, p, s), i) => inner.edgeI(n.doParams(o), i, p, s) } /** A tuple of the input edge parameters and output edge parameters for the edges bound to this node. * * If you need to access to the edges of a foreign Node, use this method (in/out create bundles). */ lazy val edges: Edges[EI, EO] = Edges(edgesIn, edgesOut) /** Create actual Wires corresponding to the Bundles parameterized by the outward edges of this node. */ protected[diplomacy] lazy val bundleOut: Seq[BO] = edgesOut.map { e => val x = Wire(outer.bundleO(e)).suggestName(s"${valName.value}Out") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } /** Create actual Wires corresponding to the Bundles parameterized by the inward edges of this node. */ protected[diplomacy] lazy val bundleIn: Seq[BI] = edgesIn.map { e => val x = Wire(inner.bundleI(e)).suggestName(s"${valName.value}In") // TODO: Don't care unconnected forwarded diplomatic signals for compatibility issue, // In the future, we should add an option to decide whether allowing unconnected in the LazyModule x := DontCare x } private def emptyDanglesOut: Seq[Dangle] = oPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(serial, i), sink = HalfEdge(n.serial, j), flipped = false, name = wirePrefix + "out", dataOpt = None ) } private def emptyDanglesIn: Seq[Dangle] = iPorts.zipWithIndex.map { case ((j, n, _, _), i) => Dangle( source = HalfEdge(n.serial, j), sink = HalfEdge(serial, i), flipped = true, name = wirePrefix + "in", dataOpt = None ) } /** Create the [[Dangle]]s which describe the connections from this node output to other nodes inputs. */ protected[diplomacy] def danglesOut: Seq[Dangle] = emptyDanglesOut.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleOut(i))) } /** Create the [[Dangle]]s which describe the connections from this node input from other nodes outputs. */ protected[diplomacy] def danglesIn: Seq[Dangle] = emptyDanglesIn.zipWithIndex.map { case (d, i) => d.copy(dataOpt = Some(bundleIn(i))) } private[diplomacy] var instantiated = false /** Gather Bundle and edge parameters of outward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def out: Seq[(BO, EO)] = { require( instantiated, s"$name.out should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleOut.zip(edgesOut) } /** Gather Bundle and edge parameters of inward ports. * * Accessors to the result of negotiation to be used within [[LazyModuleImp]] Code. Should only be used within * [[LazyModuleImp]] code or after its instantiation has completed. */ def in: Seq[(BI, EI)] = { require( instantiated, s"$name.in should not be called until after instantiation of its parent LazyModule.module has begun" ) bundleIn.zip(edgesIn) } /** Actually instantiate this node during [[LazyModuleImp]] evaluation. Mark that it's safe to use the Bundle wires, * instantiate monitors on all input ports if appropriate, and return all the dangles of this node. */ protected[diplomacy] def instantiate(): Seq[Dangle] = { instantiated = true if (!circuitIdentity) { (iPorts.zip(in)).foreach { case ((_, _, p, _), (b, e)) => if (p(MonitorsEnabled)) inner.monitor(b, e) } } danglesOut ++ danglesIn } protected[diplomacy] def cloneDangles(): Seq[Dangle] = emptyDanglesOut ++ emptyDanglesIn /** Connects the outward part of a node with the inward part of this node. */ protected[diplomacy] def bind( h: OutwardNode[DI, UI, BI], binding: NodeBinding )( implicit p: Parameters, sourceInfo: SourceInfo ): Unit = { val x = this // x := y val y = h sourceLine(sourceInfo, " at ", "") val i = x.iPushed val o = y.oPushed y.oPush( i, x, binding match { case BIND_ONCE => BIND_ONCE case BIND_FLEX => BIND_FLEX case BIND_STAR => BIND_QUERY case BIND_QUERY => BIND_STAR } ) x.iPush(o, y, binding) } /* Metadata for printing the node graph. */ def inputs: Seq[(OutwardNode[DI, UI, BI], RenderedEdge)] = (iPorts.zip(edgesIn)).map { case ((_, n, p, _), e) => val re = inner.render(e) (n, re.copy(flipped = re.flipped != p(RenderFlipped))) } /** Metadata for printing the node graph */ def outputs: Seq[(InwardNode[DO, UO, BO], RenderedEdge)] = oPorts.map { case (i, n, _, _) => (n, n.inputs(i)._2) } } File AsyncResetReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ /** This black-boxes an Async Reset * (or Set) * Register. * * Because Chisel doesn't support * parameterized black boxes, * we unfortunately have to * instantiate a number of these. * * We also have to hard-code the set/ * reset behavior. * * Do not confuse an asynchronous * reset signal with an asynchronously * reset reg. You should still * properly synchronize your reset * deassertion. * * @param d Data input * @param q Data Output * @param clk Clock Input * @param rst Reset Input * @param en Write Enable Input * */ class AsyncResetReg(resetValue: Int = 0) extends RawModule { val io = IO(new Bundle { val d = Input(Bool()) val q = Output(Bool()) val en = Input(Bool()) val clk = Input(Clock()) val rst = Input(Reset()) }) val reg = withClockAndReset(io.clk, io.rst.asAsyncReset)(RegInit(resetValue.U(1.W))) when (io.en) { reg := io.d } io.q := reg } class SimpleRegIO(val w: Int) extends Bundle{ val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) val en = Input(Bool()) } class AsyncResetRegVec(val w: Int, val init: BigInt) extends Module { override def desiredName = s"AsyncResetRegVec_w${w}_i${init}" val io = IO(new SimpleRegIO(w)) val reg = withReset(reset.asAsyncReset)(RegInit(init.U(w.W))) when (io.en) { reg := io.d } io.q := reg } object AsyncResetReg { // Create Single Registers def apply(d: Bool, clk: Clock, rst: Bool, init: Boolean, name: Option[String]): Bool = { val reg = Module(new AsyncResetReg(if (init) 1 else 0)) reg.io.d := d reg.io.clk := clk reg.io.rst := rst reg.io.en := true.B name.foreach(reg.suggestName(_)) reg.io.q } def apply(d: Bool, clk: Clock, rst: Bool): Bool = apply(d, clk, rst, false, None) def apply(d: Bool, clk: Clock, rst: Bool, name: String): Bool = apply(d, clk, rst, false, Some(name)) // Create Vectors of Registers def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: Option[String] = None): UInt = { val w = updateData.getWidth max resetData.bitLength val reg = Module(new AsyncResetRegVec(w, resetData)) name.foreach(reg.suggestName(_)) reg.io.d := updateData reg.io.en := enable reg.io.q } def apply(updateData: UInt, resetData: BigInt, enable: Bool, name: String): UInt = apply(updateData, resetData, enable, Some(name)) def apply(updateData: UInt, resetData: BigInt): UInt = apply(updateData, resetData, enable = true.B) def apply(updateData: UInt, resetData: BigInt, name: String): UInt = apply(updateData, resetData, enable = true.B, Some(name)) def apply(updateData: UInt, enable: Bool): UInt = apply(updateData, resetData=BigInt(0), enable) def apply(updateData: UInt, enable: Bool, name: String): UInt = apply(updateData, resetData = BigInt(0), enable, Some(name)) def apply(updateData: UInt): UInt = apply(updateData, resetData = BigInt(0), enable = true.B) def apply(updateData: UInt, name:String): UInt = apply(updateData, resetData = BigInt(0), enable = true.B, Some(name)) }
module IntSyncCrossingSource_n1x2_9( // @[Crossing.scala:41:9] input clock, // @[Crossing.scala:41:9] input reset, // @[Crossing.scala:41:9] input auto_in_0, // @[LazyModuleImp.scala:107:25] input auto_in_1, // @[LazyModuleImp.scala:107:25] output auto_out_sync_0, // @[LazyModuleImp.scala:107:25] output auto_out_sync_1 // @[LazyModuleImp.scala:107:25] ); wire [1:0] _reg_io_q; // @[AsyncResetReg.scala:86:21] wire auto_in_0_0 = auto_in_0; // @[Crossing.scala:41:9] wire auto_in_1_0 = auto_in_1; // @[Crossing.scala:41:9] wire nodeIn_0 = auto_in_0_0; // @[Crossing.scala:41:9] wire nodeIn_1 = auto_in_1_0; // @[Crossing.scala:41:9] wire nodeOut_sync_0; // @[MixedNode.scala:542:17] wire nodeOut_sync_1; // @[MixedNode.scala:542:17] wire auto_out_sync_0_0; // @[Crossing.scala:41:9] wire auto_out_sync_1_0; // @[Crossing.scala:41:9] assign auto_out_sync_0_0 = nodeOut_sync_0; // @[Crossing.scala:41:9] assign auto_out_sync_1_0 = nodeOut_sync_1; // @[Crossing.scala:41:9] assign nodeOut_sync_0 = _reg_io_q[0]; // @[AsyncResetReg.scala:86:21] assign nodeOut_sync_1 = _reg_io_q[1]; // @[AsyncResetReg.scala:86:21] AsyncResetRegVec_w2_i0_9 reg_0 ( // @[AsyncResetReg.scala:86:21] .clock (clock), .reset (reset), .io_d ({nodeIn_1, nodeIn_0}), // @[Crossing.scala:45:36] .io_q (_reg_io_q) ); // @[AsyncResetReg.scala:86:21] assign auto_out_sync_0 = auto_out_sync_0_0; // @[Crossing.scala:41:9] assign auto_out_sync_1 = auto_out_sync_1_0; // @[Crossing.scala:41:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File primitives.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object lowMask { def apply(in: UInt, topBound: BigInt, bottomBound: BigInt): UInt = { require(topBound != bottomBound) val numInVals = BigInt(1)<<in.getWidth if (topBound < bottomBound) { lowMask(~in, numInVals - 1 - topBound, numInVals - 1 - bottomBound) } else if (numInVals > 64 /* Empirical */) { // For simulation performance, we should avoid generating // exteremely wide shifters, so we divide and conquer. // Empirically, this does not impact synthesis QoR. val mid = numInVals / 2 val msb = in(in.getWidth - 1) val lsbs = in(in.getWidth - 2, 0) if (mid < topBound) { if (mid <= bottomBound) { Mux(msb, lowMask(lsbs, topBound - mid, bottomBound - mid), 0.U ) } else { Mux(msb, lowMask(lsbs, topBound - mid, 0) ## ((BigInt(1)<<(mid - bottomBound).toInt) - 1).U, lowMask(lsbs, mid, bottomBound) ) } } else { ~Mux(msb, 0.U, ~lowMask(lsbs, topBound, bottomBound)) } } else { val shift = (BigInt(-1)<<numInVals.toInt).S>>in Reverse( shift( (numInVals - 1 - bottomBound).toInt, (numInVals - topBound).toInt ) ) } } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object countLeadingZeros { def apply(in: UInt): UInt = PriorityEncoder(in.asBools.reverse) } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy2 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 1)>>1 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 2 + 1, ix * 2).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 2).orR reducedVec.asUInt } } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- object orReduceBy4 { def apply(in: UInt): UInt = { val reducedWidth = (in.getWidth + 3)>>2 val reducedVec = Wire(Vec(reducedWidth, Bool())) for (ix <- 0 until reducedWidth - 1) { reducedVec(ix) := in(ix * 4 + 3, ix * 4).orR } reducedVec(reducedWidth - 1) := in(in.getWidth - 1, (reducedWidth - 1) * 4).orR reducedVec.asUInt } } File RoundAnyRawFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util.Fill import consts._ //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundAnyRawFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int, options: Int ) extends RawModule { override def desiredName = s"RoundAnyRawFNToRecFN_ie${inExpWidth}_is${inSigWidth}_oe${outExpWidth}_os${outSigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(inExpWidth, inSigWidth)) // (allowed exponent range has limits) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sigMSBitAlwaysZero = ((options & flRoundOpt_sigMSBitAlwaysZero) != 0) val effectiveInSigWidth = if (sigMSBitAlwaysZero) inSigWidth else inSigWidth + 1 val neverUnderflows = ((options & (flRoundOpt_neverUnderflows | flRoundOpt_subnormsAlwaysExact) ) != 0) || (inExpWidth < outExpWidth) val neverOverflows = ((options & flRoundOpt_neverOverflows) != 0) || (inExpWidth < outExpWidth) val outNaNExp = BigInt(7)<<(outExpWidth - 2) val outInfExp = BigInt(6)<<(outExpWidth - 2) val outMaxFiniteExp = outInfExp - 1 val outMinNormExp = (BigInt(1)<<(outExpWidth - 1)) + 2 val outMinNonzeroExp = outMinNormExp - outSigWidth + 1 //------------------------------------------------------------------------ //------------------------------------------------------------------------ val roundingMode_near_even = (io.roundingMode === round_near_even) val roundingMode_minMag = (io.roundingMode === round_minMag) val roundingMode_min = (io.roundingMode === round_min) val roundingMode_max = (io.roundingMode === round_max) val roundingMode_near_maxMag = (io.roundingMode === round_near_maxMag) val roundingMode_odd = (io.roundingMode === round_odd) val roundMagUp = (roundingMode_min && io.in.sign) || (roundingMode_max && ! io.in.sign) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val sAdjustedExp = if (inExpWidth < outExpWidth) (io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S )(outExpWidth, 0).zext else if (inExpWidth == outExpWidth) io.in.sExp else io.in.sExp +& ((BigInt(1)<<outExpWidth) - (BigInt(1)<<inExpWidth)).S val adjustedSig = if (inSigWidth <= outSigWidth + 2) io.in.sig<<(outSigWidth - inSigWidth + 2) else (io.in.sig(inSigWidth, inSigWidth - outSigWidth - 1) ## io.in.sig(inSigWidth - outSigWidth - 2, 0).orR ) val doShiftSigDown1 = if (sigMSBitAlwaysZero) false.B else adjustedSig(outSigWidth + 2) val common_expOut = Wire(UInt((outExpWidth + 1).W)) val common_fractOut = Wire(UInt((outSigWidth - 1).W)) val common_overflow = Wire(Bool()) val common_totalUnderflow = Wire(Bool()) val common_underflow = Wire(Bool()) val common_inexact = Wire(Bool()) if ( neverOverflows && neverUnderflows && (effectiveInSigWidth <= outSigWidth) ) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- common_expOut := sAdjustedExp(outExpWidth, 0) + doShiftSigDown1 common_fractOut := Mux(doShiftSigDown1, adjustedSig(outSigWidth + 1, 3), adjustedSig(outSigWidth, 2) ) common_overflow := false.B common_totalUnderflow := false.B common_underflow := false.B common_inexact := false.B } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundMask = if (neverUnderflows) 0.U(outSigWidth.W) ## doShiftSigDown1 ## 3.U(2.W) else (lowMask( sAdjustedExp(outExpWidth, 0), outMinNormExp - outSigWidth - 1, outMinNormExp ) | doShiftSigDown1) ## 3.U(2.W) val shiftedRoundMask = 0.U(1.W) ## roundMask>>1 val roundPosMask = ~shiftedRoundMask & roundMask val roundPosBit = (adjustedSig & roundPosMask).orR val anyRoundExtra = (adjustedSig & shiftedRoundMask).orR val anyRound = roundPosBit || anyRoundExtra val roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && roundPosBit) || (roundMagUp && anyRound) val roundedSig: Bits = Mux(roundIncr, (((adjustedSig | roundMask)>>2) +& 1.U) & ~Mux(roundingMode_near_even && roundPosBit && ! anyRoundExtra, roundMask>>1, 0.U((outSigWidth + 2).W) ), (adjustedSig & ~roundMask)>>2 | Mux(roundingMode_odd && anyRound, roundPosMask>>1, 0.U) ) //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? val sRoundedExp = sAdjustedExp +& (roundedSig>>outSigWidth).asUInt.zext common_expOut := sRoundedExp(outExpWidth, 0) common_fractOut := Mux(doShiftSigDown1, roundedSig(outSigWidth - 1, 1), roundedSig(outSigWidth - 2, 0) ) common_overflow := (if (neverOverflows) false.B else //*** REWRITE BASED ON BEFORE-ROUNDING EXPONENT?: (sRoundedExp>>(outExpWidth - 1) >= 3.S)) common_totalUnderflow := (if (neverUnderflows) false.B else //*** WOULD BE GOOD ENOUGH TO USE EXPONENT BEFORE ROUNDING?: (sRoundedExp < outMinNonzeroExp.S)) val unboundedRange_roundPosBit = Mux(doShiftSigDown1, adjustedSig(2), adjustedSig(1)) val unboundedRange_anyRound = (doShiftSigDown1 && adjustedSig(2)) || adjustedSig(1, 0).orR val unboundedRange_roundIncr = ((roundingMode_near_even || roundingMode_near_maxMag) && unboundedRange_roundPosBit) || (roundMagUp && unboundedRange_anyRound) val roundCarry = Mux(doShiftSigDown1, roundedSig(outSigWidth + 1), roundedSig(outSigWidth) ) common_underflow := (if (neverUnderflows) false.B else common_totalUnderflow || //*** IF SIG WIDTH IS VERY NARROW, NEED TO ACCOUNT FOR ROUND-EVEN ZEROING //*** M.S. BIT OF SUBNORMAL SIG? (anyRound && ((sAdjustedExp>>outExpWidth) <= 0.S) && Mux(doShiftSigDown1, roundMask(3), roundMask(2)) && ! ((io.detectTininess === tininess_afterRounding) && ! Mux(doShiftSigDown1, roundMask(4), roundMask(3) ) && roundCarry && roundPosBit && unboundedRange_roundIncr))) common_inexact := common_totalUnderflow || anyRound } //------------------------------------------------------------------------ //------------------------------------------------------------------------ val isNaNOut = io.invalidExc || io.in.isNaN val notNaN_isSpecialInfOut = io.infiniteExc || io.in.isInf val commonCase = ! isNaNOut && ! notNaN_isSpecialInfOut && ! io.in.isZero val overflow = commonCase && common_overflow val underflow = commonCase && common_underflow val inexact = overflow || (commonCase && common_inexact) val overflow_roundMagUp = roundingMode_near_even || roundingMode_near_maxMag || roundMagUp val pegMinNonzeroMagOut = commonCase && common_totalUnderflow && (roundMagUp || roundingMode_odd) val pegMaxFiniteMagOut = overflow && ! overflow_roundMagUp val notNaN_isInfOut = notNaN_isSpecialInfOut || (overflow && overflow_roundMagUp) val signOut = Mux(isNaNOut, false.B, io.in.sign) val expOut = (common_expOut & ~Mux(io.in.isZero || common_totalUnderflow, (BigInt(7)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMinNonzeroMagOut, ~outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) & ~Mux(pegMaxFiniteMagOut, (BigInt(1)<<(outExpWidth - 1)).U((outExpWidth + 1).W), 0.U ) & ~Mux(notNaN_isInfOut, (BigInt(1)<<(outExpWidth - 2)).U((outExpWidth + 1).W), 0.U )) | Mux(pegMinNonzeroMagOut, outMinNonzeroExp.U((outExpWidth + 1).W), 0.U ) | Mux(pegMaxFiniteMagOut, outMaxFiniteExp.U((outExpWidth + 1).W), 0.U ) | Mux(notNaN_isInfOut, outInfExp.U((outExpWidth + 1).W), 0.U) | Mux(isNaNOut, outNaNExp.U((outExpWidth + 1).W), 0.U) val fractOut = Mux(isNaNOut || io.in.isZero || common_totalUnderflow, Mux(isNaNOut, (BigInt(1)<<(outSigWidth - 2)).U, 0.U), common_fractOut ) | Fill(outSigWidth - 1, pegMaxFiniteMagOut) io.out := signOut ## expOut ## fractOut io.exceptionFlags := io.invalidExc ## io.infiniteExc ## overflow ## underflow ## inexact } //---------------------------------------------------------------------------- //---------------------------------------------------------------------------- class RoundRawFNToRecFN(expWidth: Int, sigWidth: Int, options: Int) extends RawModule { override def desiredName = s"RoundRawFNToRecFN_e${expWidth}_s${sigWidth}" val io = IO(new Bundle { val invalidExc = Input(Bool()) // overrides 'infiniteExc' and 'in' val infiniteExc = Input(Bool()) // overrides 'in' except for 'in.sign' val in = Input(new RawFloat(expWidth, sigWidth + 2)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((expWidth + sigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( expWidth, sigWidth + 2, expWidth, sigWidth, options)) roundAnyRawFNToRecFN.io.invalidExc := io.invalidExc roundAnyRawFNToRecFN.io.infiniteExc := io.infiniteExc roundAnyRawFNToRecFN.io.in := io.in roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags }
module RoundAnyRawFNToRecFN_ie8_is26_oe8_os24_50( // @[RoundAnyRawFNToRecFN.scala:48:5] input io_invalidExc, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isNaN, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isInf, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_isZero, // @[RoundAnyRawFNToRecFN.scala:58:16] input io_in_sign, // @[RoundAnyRawFNToRecFN.scala:58:16] input [9:0] io_in_sExp, // @[RoundAnyRawFNToRecFN.scala:58:16] input [26:0] io_in_sig, // @[RoundAnyRawFNToRecFN.scala:58:16] output [32:0] io_out, // @[RoundAnyRawFNToRecFN.scala:58:16] output [4:0] io_exceptionFlags // @[RoundAnyRawFNToRecFN.scala:58:16] ); wire io_invalidExc_0 = io_invalidExc; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isNaN_0 = io_in_isNaN; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isInf_0 = io_in_isInf; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_isZero_0 = io_in_isZero; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_in_sign_0 = io_in_sign; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [9:0] io_in_sExp_0 = io_in_sExp; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [26:0] io_in_sig_0 = io_in_sig; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [8:0] _expOut_T_4 = 9'h194; // @[RoundAnyRawFNToRecFN.scala:258:19] wire [15:0] _roundMask_T_5 = 16'hFF; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_4 = 16'hFF00; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_10 = 16'hFF00; // @[primitives.scala:77:20] wire [11:0] _roundMask_T_13 = 12'hFF; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_14 = 16'hFF0; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_15 = 16'hF0F; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_20 = 16'hF0F0; // @[primitives.scala:77:20] wire [13:0] _roundMask_T_23 = 14'hF0F; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_24 = 16'h3C3C; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_25 = 16'h3333; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_30 = 16'hCCCC; // @[primitives.scala:77:20] wire [14:0] _roundMask_T_33 = 15'h3333; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_34 = 16'h6666; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_35 = 16'h5555; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_40 = 16'hAAAA; // @[primitives.scala:77:20] wire [25:0] _roundedSig_T_15 = 26'h0; // @[RoundAnyRawFNToRecFN.scala:181:24] wire [8:0] _expOut_T_6 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14] wire [8:0] _expOut_T_9 = 9'h1FF; // @[RoundAnyRawFNToRecFN.scala:257:14, :261:14] wire [8:0] _expOut_T_5 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:257:18] wire [8:0] _expOut_T_8 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:261:18] wire [8:0] _expOut_T_14 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:269:16] wire [8:0] _expOut_T_16 = 9'h0; // @[RoundAnyRawFNToRecFN.scala:273:16] wire [22:0] _fractOut_T_4 = 23'h0; // @[RoundAnyRawFNToRecFN.scala:284:13] wire io_detectTininess = 1'h1; // @[RoundAnyRawFNToRecFN.scala:48:5] wire roundingMode_near_even = 1'h1; // @[RoundAnyRawFNToRecFN.scala:90:53] wire _roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:169:38] wire _unboundedRange_roundIncr_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:207:38] wire _common_underflow_T_7 = 1'h1; // @[RoundAnyRawFNToRecFN.scala:222:49] wire _overflow_roundMagUp_T = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:32] wire overflow_roundMagUp = 1'h1; // @[RoundAnyRawFNToRecFN.scala:243:60] wire [2:0] io_roundingMode = 3'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire io_infiniteExc = 1'h0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire roundingMode_minMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:91:53] wire roundingMode_min = 1'h0; // @[RoundAnyRawFNToRecFN.scala:92:53] wire roundingMode_max = 1'h0; // @[RoundAnyRawFNToRecFN.scala:93:53] wire roundingMode_near_maxMag = 1'h0; // @[RoundAnyRawFNToRecFN.scala:94:53] wire roundingMode_odd = 1'h0; // @[RoundAnyRawFNToRecFN.scala:95:53] wire _roundMagUp_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:27] wire _roundMagUp_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:63] wire roundMagUp = 1'h0; // @[RoundAnyRawFNToRecFN.scala:98:42] wire _roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:171:29] wire _roundedSig_T_13 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:181:42] wire _unboundedRange_roundIncr_T_2 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:209:29] wire _pegMinNonzeroMagOut_T_1 = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:60] wire pegMinNonzeroMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:245:45] wire _pegMaxFiniteMagOut_T = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:42] wire pegMaxFiniteMagOut = 1'h0; // @[RoundAnyRawFNToRecFN.scala:246:39] wire notNaN_isSpecialInfOut = io_in_isInf_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :236:49] wire [26:0] adjustedSig = io_in_sig_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :114:22] wire [32:0] _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:286:33] wire [4:0] _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:288:66] wire [32:0] io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire [4:0] io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] wire _roundMagUp_T_1 = ~io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :98:66] wire doShiftSigDown1 = adjustedSig[26]; // @[RoundAnyRawFNToRecFN.scala:114:22, :120:57] wire [8:0] _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:187:37] wire [8:0] common_expOut; // @[RoundAnyRawFNToRecFN.scala:122:31] wire [22:0] _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:189:16] wire [22:0] common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31] wire _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:196:50] wire common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37] wire _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:200:31] wire common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37] wire _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:217:40] wire common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37] wire _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:230:49] wire common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37] wire [8:0] _roundMask_T = io_in_sExp_0[8:0]; // @[RoundAnyRawFNToRecFN.scala:48:5, :156:37] wire [8:0] _roundMask_T_1 = ~_roundMask_T; // @[primitives.scala:52:21] wire roundMask_msb = _roundMask_T_1[8]; // @[primitives.scala:52:21, :58:25] wire [7:0] roundMask_lsbs = _roundMask_T_1[7:0]; // @[primitives.scala:52:21, :59:26] wire roundMask_msb_1 = roundMask_lsbs[7]; // @[primitives.scala:58:25, :59:26] wire [6:0] roundMask_lsbs_1 = roundMask_lsbs[6:0]; // @[primitives.scala:59:26] wire roundMask_msb_2 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26] wire roundMask_msb_3 = roundMask_lsbs_1[6]; // @[primitives.scala:58:25, :59:26] wire [5:0] roundMask_lsbs_2 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26] wire [5:0] roundMask_lsbs_3 = roundMask_lsbs_1[5:0]; // @[primitives.scala:59:26] wire [64:0] roundMask_shift = $signed(65'sh10000000000000000 >>> roundMask_lsbs_2); // @[primitives.scala:59:26, :76:56] wire [21:0] _roundMask_T_2 = roundMask_shift[63:42]; // @[primitives.scala:76:56, :78:22] wire [15:0] _roundMask_T_3 = _roundMask_T_2[15:0]; // @[primitives.scala:77:20, :78:22] wire [7:0] _roundMask_T_6 = _roundMask_T_3[15:8]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_7 = {8'h0, _roundMask_T_6}; // @[primitives.scala:77:20] wire [7:0] _roundMask_T_8 = _roundMask_T_3[7:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_9 = {_roundMask_T_8, 8'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_11 = _roundMask_T_9 & 16'hFF00; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_12 = _roundMask_T_7 | _roundMask_T_11; // @[primitives.scala:77:20] wire [11:0] _roundMask_T_16 = _roundMask_T_12[15:4]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_17 = {4'h0, _roundMask_T_16 & 12'hF0F}; // @[primitives.scala:77:20] wire [11:0] _roundMask_T_18 = _roundMask_T_12[11:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_19 = {_roundMask_T_18, 4'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_21 = _roundMask_T_19 & 16'hF0F0; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_22 = _roundMask_T_17 | _roundMask_T_21; // @[primitives.scala:77:20] wire [13:0] _roundMask_T_26 = _roundMask_T_22[15:2]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_27 = {2'h0, _roundMask_T_26 & 14'h3333}; // @[primitives.scala:77:20] wire [13:0] _roundMask_T_28 = _roundMask_T_22[13:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_29 = {_roundMask_T_28, 2'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_31 = _roundMask_T_29 & 16'hCCCC; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_32 = _roundMask_T_27 | _roundMask_T_31; // @[primitives.scala:77:20] wire [14:0] _roundMask_T_36 = _roundMask_T_32[15:1]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_37 = {1'h0, _roundMask_T_36 & 15'h5555}; // @[primitives.scala:77:20] wire [14:0] _roundMask_T_38 = _roundMask_T_32[14:0]; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_39 = {_roundMask_T_38, 1'h0}; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_41 = _roundMask_T_39 & 16'hAAAA; // @[primitives.scala:77:20] wire [15:0] _roundMask_T_42 = _roundMask_T_37 | _roundMask_T_41; // @[primitives.scala:77:20] wire [5:0] _roundMask_T_43 = _roundMask_T_2[21:16]; // @[primitives.scala:77:20, :78:22] wire [3:0] _roundMask_T_44 = _roundMask_T_43[3:0]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_45 = _roundMask_T_44[1:0]; // @[primitives.scala:77:20] wire _roundMask_T_46 = _roundMask_T_45[0]; // @[primitives.scala:77:20] wire _roundMask_T_47 = _roundMask_T_45[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_48 = {_roundMask_T_46, _roundMask_T_47}; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_49 = _roundMask_T_44[3:2]; // @[primitives.scala:77:20] wire _roundMask_T_50 = _roundMask_T_49[0]; // @[primitives.scala:77:20] wire _roundMask_T_51 = _roundMask_T_49[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_52 = {_roundMask_T_50, _roundMask_T_51}; // @[primitives.scala:77:20] wire [3:0] _roundMask_T_53 = {_roundMask_T_48, _roundMask_T_52}; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_54 = _roundMask_T_43[5:4]; // @[primitives.scala:77:20] wire _roundMask_T_55 = _roundMask_T_54[0]; // @[primitives.scala:77:20] wire _roundMask_T_56 = _roundMask_T_54[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_57 = {_roundMask_T_55, _roundMask_T_56}; // @[primitives.scala:77:20] wire [5:0] _roundMask_T_58 = {_roundMask_T_53, _roundMask_T_57}; // @[primitives.scala:77:20] wire [21:0] _roundMask_T_59 = {_roundMask_T_42, _roundMask_T_58}; // @[primitives.scala:77:20] wire [21:0] _roundMask_T_60 = ~_roundMask_T_59; // @[primitives.scala:73:32, :77:20] wire [21:0] _roundMask_T_61 = roundMask_msb_2 ? 22'h0 : _roundMask_T_60; // @[primitives.scala:58:25, :73:{21,32}] wire [21:0] _roundMask_T_62 = ~_roundMask_T_61; // @[primitives.scala:73:{17,21}] wire [24:0] _roundMask_T_63 = {_roundMask_T_62, 3'h7}; // @[primitives.scala:68:58, :73:17] wire [64:0] roundMask_shift_1 = $signed(65'sh10000000000000000 >>> roundMask_lsbs_3); // @[primitives.scala:59:26, :76:56] wire [2:0] _roundMask_T_64 = roundMask_shift_1[2:0]; // @[primitives.scala:76:56, :78:22] wire [1:0] _roundMask_T_65 = _roundMask_T_64[1:0]; // @[primitives.scala:77:20, :78:22] wire _roundMask_T_66 = _roundMask_T_65[0]; // @[primitives.scala:77:20] wire _roundMask_T_67 = _roundMask_T_65[1]; // @[primitives.scala:77:20] wire [1:0] _roundMask_T_68 = {_roundMask_T_66, _roundMask_T_67}; // @[primitives.scala:77:20] wire _roundMask_T_69 = _roundMask_T_64[2]; // @[primitives.scala:77:20, :78:22] wire [2:0] _roundMask_T_70 = {_roundMask_T_68, _roundMask_T_69}; // @[primitives.scala:77:20] wire [2:0] _roundMask_T_71 = roundMask_msb_3 ? _roundMask_T_70 : 3'h0; // @[primitives.scala:58:25, :62:24, :77:20] wire [24:0] _roundMask_T_72 = roundMask_msb_1 ? _roundMask_T_63 : {22'h0, _roundMask_T_71}; // @[primitives.scala:58:25, :62:24, :67:24, :68:58] wire [24:0] _roundMask_T_73 = roundMask_msb ? _roundMask_T_72 : 25'h0; // @[primitives.scala:58:25, :62:24, :67:24] wire [24:0] _roundMask_T_74 = {_roundMask_T_73[24:1], _roundMask_T_73[0] | doShiftSigDown1}; // @[primitives.scala:62:24] wire [26:0] roundMask = {_roundMask_T_74, 2'h3}; // @[RoundAnyRawFNToRecFN.scala:159:{23,42}] wire [27:0] _shiftedRoundMask_T = {1'h0, roundMask}; // @[RoundAnyRawFNToRecFN.scala:159:42, :162:41] wire [26:0] shiftedRoundMask = _shiftedRoundMask_T[27:1]; // @[RoundAnyRawFNToRecFN.scala:162:{41,53}] wire [26:0] _roundPosMask_T = ~shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:162:53, :163:28] wire [26:0] roundPosMask = _roundPosMask_T & roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :163:{28,46}] wire [26:0] _roundPosBit_T = adjustedSig & roundPosMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :163:46, :164:40] wire roundPosBit = |_roundPosBit_T; // @[RoundAnyRawFNToRecFN.scala:164:{40,56}] wire _roundIncr_T_1 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :169:67] wire _roundedSig_T_3 = roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :175:49] wire [26:0] _anyRoundExtra_T = adjustedSig & shiftedRoundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :162:53, :165:42] wire anyRoundExtra = |_anyRoundExtra_T; // @[RoundAnyRawFNToRecFN.scala:165:{42,62}] wire anyRound = roundPosBit | anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:164:56, :165:62, :166:36] wire roundIncr = _roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:169:67, :170:31] wire [26:0] _roundedSig_T = adjustedSig | roundMask; // @[RoundAnyRawFNToRecFN.scala:114:22, :159:42, :174:32] wire [24:0] _roundedSig_T_1 = _roundedSig_T[26:2]; // @[RoundAnyRawFNToRecFN.scala:174:{32,44}] wire [25:0] _roundedSig_T_2 = {1'h0, _roundedSig_T_1} + 26'h1; // @[RoundAnyRawFNToRecFN.scala:174:{44,49}] wire _roundedSig_T_4 = ~anyRoundExtra; // @[RoundAnyRawFNToRecFN.scala:165:62, :176:30] wire _roundedSig_T_5 = _roundedSig_T_3 & _roundedSig_T_4; // @[RoundAnyRawFNToRecFN.scala:175:{49,64}, :176:30] wire [25:0] _roundedSig_T_6 = roundMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:159:42, :177:35] wire [25:0] _roundedSig_T_7 = _roundedSig_T_5 ? _roundedSig_T_6 : 26'h0; // @[RoundAnyRawFNToRecFN.scala:175:{25,64}, :177:35] wire [25:0] _roundedSig_T_8 = ~_roundedSig_T_7; // @[RoundAnyRawFNToRecFN.scala:175:{21,25}] wire [25:0] _roundedSig_T_9 = _roundedSig_T_2 & _roundedSig_T_8; // @[RoundAnyRawFNToRecFN.scala:174:{49,57}, :175:21] wire [26:0] _roundedSig_T_10 = ~roundMask; // @[RoundAnyRawFNToRecFN.scala:159:42, :180:32] wire [26:0] _roundedSig_T_11 = adjustedSig & _roundedSig_T_10; // @[RoundAnyRawFNToRecFN.scala:114:22, :180:{30,32}] wire [24:0] _roundedSig_T_12 = _roundedSig_T_11[26:2]; // @[RoundAnyRawFNToRecFN.scala:180:{30,43}] wire [25:0] _roundedSig_T_14 = roundPosMask[26:1]; // @[RoundAnyRawFNToRecFN.scala:163:46, :181:67] wire [25:0] _roundedSig_T_16 = {1'h0, _roundedSig_T_12}; // @[RoundAnyRawFNToRecFN.scala:180:{43,47}] wire [25:0] roundedSig = roundIncr ? _roundedSig_T_9 : _roundedSig_T_16; // @[RoundAnyRawFNToRecFN.scala:170:31, :173:16, :174:57, :180:47] wire [1:0] _sRoundedExp_T = roundedSig[25:24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :185:54] wire [2:0] _sRoundedExp_T_1 = {1'h0, _sRoundedExp_T}; // @[RoundAnyRawFNToRecFN.scala:185:{54,76}] wire [10:0] sRoundedExp = {io_in_sExp_0[9], io_in_sExp_0} + {{8{_sRoundedExp_T_1[2]}}, _sRoundedExp_T_1}; // @[RoundAnyRawFNToRecFN.scala:48:5, :185:{40,76}] assign _common_expOut_T = sRoundedExp[8:0]; // @[RoundAnyRawFNToRecFN.scala:185:40, :187:37] assign common_expOut = _common_expOut_T; // @[RoundAnyRawFNToRecFN.scala:122:31, :187:37] wire [22:0] _common_fractOut_T = roundedSig[23:1]; // @[RoundAnyRawFNToRecFN.scala:173:16, :190:27] wire [22:0] _common_fractOut_T_1 = roundedSig[22:0]; // @[RoundAnyRawFNToRecFN.scala:173:16, :191:27] assign _common_fractOut_T_2 = doShiftSigDown1 ? _common_fractOut_T : _common_fractOut_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :189:16, :190:27, :191:27] assign common_fractOut = _common_fractOut_T_2; // @[RoundAnyRawFNToRecFN.scala:123:31, :189:16] wire [3:0] _common_overflow_T = sRoundedExp[10:7]; // @[RoundAnyRawFNToRecFN.scala:185:40, :196:30] assign _common_overflow_T_1 = $signed(_common_overflow_T) > 4'sh2; // @[RoundAnyRawFNToRecFN.scala:196:{30,50}] assign common_overflow = _common_overflow_T_1; // @[RoundAnyRawFNToRecFN.scala:124:37, :196:50] assign _common_totalUnderflow_T = $signed(sRoundedExp) < 11'sh6B; // @[RoundAnyRawFNToRecFN.scala:185:40, :200:31] assign common_totalUnderflow = _common_totalUnderflow_T; // @[RoundAnyRawFNToRecFN.scala:125:37, :200:31] wire _unboundedRange_roundPosBit_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45] wire _unboundedRange_anyRound_T = adjustedSig[2]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:45, :205:44] wire _unboundedRange_roundPosBit_T_1 = adjustedSig[1]; // @[RoundAnyRawFNToRecFN.scala:114:22, :203:61] wire unboundedRange_roundPosBit = doShiftSigDown1 ? _unboundedRange_roundPosBit_T : _unboundedRange_roundPosBit_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :203:{16,45,61}] wire _unboundedRange_roundIncr_T_1 = unboundedRange_roundPosBit; // @[RoundAnyRawFNToRecFN.scala:203:16, :207:67] wire _unboundedRange_anyRound_T_1 = doShiftSigDown1 & _unboundedRange_anyRound_T; // @[RoundAnyRawFNToRecFN.scala:120:57, :205:{30,44}] wire [1:0] _unboundedRange_anyRound_T_2 = adjustedSig[1:0]; // @[RoundAnyRawFNToRecFN.scala:114:22, :205:63] wire _unboundedRange_anyRound_T_3 = |_unboundedRange_anyRound_T_2; // @[RoundAnyRawFNToRecFN.scala:205:{63,70}] wire unboundedRange_anyRound = _unboundedRange_anyRound_T_1 | _unboundedRange_anyRound_T_3; // @[RoundAnyRawFNToRecFN.scala:205:{30,49,70}] wire unboundedRange_roundIncr = _unboundedRange_roundIncr_T_1; // @[RoundAnyRawFNToRecFN.scala:207:67, :208:46] wire _roundCarry_T = roundedSig[25]; // @[RoundAnyRawFNToRecFN.scala:173:16, :212:27] wire _roundCarry_T_1 = roundedSig[24]; // @[RoundAnyRawFNToRecFN.scala:173:16, :213:27] wire roundCarry = doShiftSigDown1 ? _roundCarry_T : _roundCarry_T_1; // @[RoundAnyRawFNToRecFN.scala:120:57, :211:16, :212:27, :213:27] wire [1:0] _common_underflow_T = io_in_sExp_0[9:8]; // @[RoundAnyRawFNToRecFN.scala:48:5, :220:49] wire _common_underflow_T_1 = _common_underflow_T != 2'h1; // @[RoundAnyRawFNToRecFN.scala:220:{49,64}] wire _common_underflow_T_2 = anyRound & _common_underflow_T_1; // @[RoundAnyRawFNToRecFN.scala:166:36, :220:{32,64}] wire _common_underflow_T_3 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57] wire _common_underflow_T_9 = roundMask[3]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:57, :225:49] wire _common_underflow_T_4 = roundMask[2]; // @[RoundAnyRawFNToRecFN.scala:159:42, :221:71] wire _common_underflow_T_5 = doShiftSigDown1 ? _common_underflow_T_3 : _common_underflow_T_4; // @[RoundAnyRawFNToRecFN.scala:120:57, :221:{30,57,71}] wire _common_underflow_T_6 = _common_underflow_T_2 & _common_underflow_T_5; // @[RoundAnyRawFNToRecFN.scala:220:{32,72}, :221:30] wire _common_underflow_T_8 = roundMask[4]; // @[RoundAnyRawFNToRecFN.scala:159:42, :224:49] wire _common_underflow_T_10 = doShiftSigDown1 ? _common_underflow_T_8 : _common_underflow_T_9; // @[RoundAnyRawFNToRecFN.scala:120:57, :223:39, :224:49, :225:49] wire _common_underflow_T_11 = ~_common_underflow_T_10; // @[RoundAnyRawFNToRecFN.scala:223:{34,39}] wire _common_underflow_T_12 = _common_underflow_T_11; // @[RoundAnyRawFNToRecFN.scala:222:77, :223:34] wire _common_underflow_T_13 = _common_underflow_T_12 & roundCarry; // @[RoundAnyRawFNToRecFN.scala:211:16, :222:77, :226:38] wire _common_underflow_T_14 = _common_underflow_T_13 & roundPosBit; // @[RoundAnyRawFNToRecFN.scala:164:56, :226:38, :227:45] wire _common_underflow_T_15 = _common_underflow_T_14 & unboundedRange_roundIncr; // @[RoundAnyRawFNToRecFN.scala:208:46, :227:{45,60}] wire _common_underflow_T_16 = ~_common_underflow_T_15; // @[RoundAnyRawFNToRecFN.scala:222:27, :227:60] wire _common_underflow_T_17 = _common_underflow_T_6 & _common_underflow_T_16; // @[RoundAnyRawFNToRecFN.scala:220:72, :221:76, :222:27] assign _common_underflow_T_18 = common_totalUnderflow | _common_underflow_T_17; // @[RoundAnyRawFNToRecFN.scala:125:37, :217:40, :221:76] assign common_underflow = _common_underflow_T_18; // @[RoundAnyRawFNToRecFN.scala:126:37, :217:40] assign _common_inexact_T = common_totalUnderflow | anyRound; // @[RoundAnyRawFNToRecFN.scala:125:37, :166:36, :230:49] assign common_inexact = _common_inexact_T; // @[RoundAnyRawFNToRecFN.scala:127:37, :230:49] wire isNaNOut = io_invalidExc_0 | io_in_isNaN_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34] wire _commonCase_T = ~isNaNOut; // @[RoundAnyRawFNToRecFN.scala:235:34, :237:22] wire _commonCase_T_1 = ~notNaN_isSpecialInfOut; // @[RoundAnyRawFNToRecFN.scala:236:49, :237:36] wire _commonCase_T_2 = _commonCase_T & _commonCase_T_1; // @[RoundAnyRawFNToRecFN.scala:237:{22,33,36}] wire _commonCase_T_3 = ~io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :237:64] wire commonCase = _commonCase_T_2 & _commonCase_T_3; // @[RoundAnyRawFNToRecFN.scala:237:{33,61,64}] wire overflow = commonCase & common_overflow; // @[RoundAnyRawFNToRecFN.scala:124:37, :237:61, :238:32] wire _notNaN_isInfOut_T = overflow; // @[RoundAnyRawFNToRecFN.scala:238:32, :248:45] wire underflow = commonCase & common_underflow; // @[RoundAnyRawFNToRecFN.scala:126:37, :237:61, :239:32] wire _inexact_T = commonCase & common_inexact; // @[RoundAnyRawFNToRecFN.scala:127:37, :237:61, :240:43] wire inexact = overflow | _inexact_T; // @[RoundAnyRawFNToRecFN.scala:238:32, :240:{28,43}] wire _pegMinNonzeroMagOut_T = commonCase & common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :237:61, :245:20] wire notNaN_isInfOut = notNaN_isSpecialInfOut | _notNaN_isInfOut_T; // @[RoundAnyRawFNToRecFN.scala:236:49, :248:{32,45}] wire signOut = ~isNaNOut & io_in_sign_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :250:22] wire _expOut_T = io_in_isZero_0 | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:48:5, :125:37, :253:32] wire [8:0] _expOut_T_1 = _expOut_T ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:253:{18,32}] wire [8:0] _expOut_T_2 = ~_expOut_T_1; // @[RoundAnyRawFNToRecFN.scala:253:{14,18}] wire [8:0] _expOut_T_3 = common_expOut & _expOut_T_2; // @[RoundAnyRawFNToRecFN.scala:122:31, :252:24, :253:14] wire [8:0] _expOut_T_7 = _expOut_T_3; // @[RoundAnyRawFNToRecFN.scala:252:24, :256:17] wire [8:0] _expOut_T_10 = _expOut_T_7; // @[RoundAnyRawFNToRecFN.scala:256:17, :260:17] wire [8:0] _expOut_T_11 = {2'h0, notNaN_isInfOut, 6'h0}; // @[RoundAnyRawFNToRecFN.scala:248:32, :265:18] wire [8:0] _expOut_T_12 = ~_expOut_T_11; // @[RoundAnyRawFNToRecFN.scala:265:{14,18}] wire [8:0] _expOut_T_13 = _expOut_T_10 & _expOut_T_12; // @[RoundAnyRawFNToRecFN.scala:260:17, :264:17, :265:14] wire [8:0] _expOut_T_15 = _expOut_T_13; // @[RoundAnyRawFNToRecFN.scala:264:17, :268:18] wire [8:0] _expOut_T_17 = _expOut_T_15; // @[RoundAnyRawFNToRecFN.scala:268:18, :272:15] wire [8:0] _expOut_T_18 = notNaN_isInfOut ? 9'h180 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:248:32, :277:16] wire [8:0] _expOut_T_19 = _expOut_T_17 | _expOut_T_18; // @[RoundAnyRawFNToRecFN.scala:272:15, :276:15, :277:16] wire [8:0] _expOut_T_20 = isNaNOut ? 9'h1C0 : 9'h0; // @[RoundAnyRawFNToRecFN.scala:235:34, :278:16] wire [8:0] expOut = _expOut_T_19 | _expOut_T_20; // @[RoundAnyRawFNToRecFN.scala:276:15, :277:73, :278:16] wire _fractOut_T = isNaNOut | io_in_isZero_0; // @[RoundAnyRawFNToRecFN.scala:48:5, :235:34, :280:22] wire _fractOut_T_1 = _fractOut_T | common_totalUnderflow; // @[RoundAnyRawFNToRecFN.scala:125:37, :280:{22,38}] wire [22:0] _fractOut_T_2 = {isNaNOut, 22'h0}; // @[RoundAnyRawFNToRecFN.scala:235:34, :281:16] wire [22:0] _fractOut_T_3 = _fractOut_T_1 ? _fractOut_T_2 : common_fractOut; // @[RoundAnyRawFNToRecFN.scala:123:31, :280:{12,38}, :281:16] wire [22:0] fractOut = _fractOut_T_3; // @[RoundAnyRawFNToRecFN.scala:280:12, :283:11] wire [9:0] _io_out_T = {signOut, expOut}; // @[RoundAnyRawFNToRecFN.scala:250:22, :277:73, :286:23] assign _io_out_T_1 = {_io_out_T, fractOut}; // @[RoundAnyRawFNToRecFN.scala:283:11, :286:{23,33}] assign io_out_0 = _io_out_T_1; // @[RoundAnyRawFNToRecFN.scala:48:5, :286:33] wire [1:0] _io_exceptionFlags_T = {io_invalidExc_0, 1'h0}; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:23] wire [2:0] _io_exceptionFlags_T_1 = {_io_exceptionFlags_T, overflow}; // @[RoundAnyRawFNToRecFN.scala:238:32, :288:{23,41}] wire [3:0] _io_exceptionFlags_T_2 = {_io_exceptionFlags_T_1, underflow}; // @[RoundAnyRawFNToRecFN.scala:239:32, :288:{41,53}] assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, inexact}; // @[RoundAnyRawFNToRecFN.scala:240:28, :288:{53,66}] assign io_exceptionFlags_0 = _io_exceptionFlags_T_3; // @[RoundAnyRawFNToRecFN.scala:48:5, :288:66] assign io_out = io_out_0; // @[RoundAnyRawFNToRecFN.scala:48:5] assign io_exceptionFlags = io_exceptionFlags_0; // @[RoundAnyRawFNToRecFN.scala:48:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Monitor.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceLine import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy._ import freechips.rocketchip.diplomacy.EnableMonitors import freechips.rocketchip.formal.{MonitorDirection, IfThen, Property, PropertyClass, TestplanTestType, TLMonitorStrictMode} import freechips.rocketchip.util.PlusArg case class TLMonitorArgs(edge: TLEdge) abstract class TLMonitorBase(args: TLMonitorArgs) extends Module { val io = IO(new Bundle { val in = Input(new TLBundle(args.edge.bundle)) }) def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit legalize(io.in, args.edge, reset) } object TLMonitor { def apply(enable: Boolean, node: TLNode)(implicit p: Parameters): TLNode = { if (enable) { EnableMonitors { implicit p => node := TLEphemeralNode()(ValName("monitor")) } } else { node } } } class TLMonitor(args: TLMonitorArgs, monitorDir: MonitorDirection = MonitorDirection.Monitor) extends TLMonitorBase(args) { require (args.edge.params(TLMonitorStrictMode) || (! args.edge.params(TestplanTestType).formal)) val cover_prop_class = PropertyClass.Default //Like assert but can flip to being an assumption for formal verification def monAssert(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir, cond, message, PropertyClass.Default) } def assume(cond: Bool, message: String): Unit = if (monitorDir == MonitorDirection.Monitor) { assert(cond, message) } else { Property(monitorDir.flip, cond, message, PropertyClass.Default) } def extra = { args.edge.sourceInfo match { case SourceLine(filename, line, col) => s" (connected at $filename:$line:$col)" case _ => "" } } def visible(address: UInt, source: UInt, edge: TLEdge) = edge.client.clients.map { c => !c.sourceId.contains(source) || c.visibility.map(_.contains(address)).reduce(_ || _) }.reduce(_ && _) def legalizeFormatA(bundle: TLBundleA, edge: TLEdge): Unit = { //switch this flag to turn on diplomacy in error messages def diplomacyInfo = if (true) "" else "\nThe diplomacy information for the edge is as follows:\n" + edge.formatEdge + "\n" monAssert (TLMessages.isA(bundle.opcode), "'A' channel has invalid opcode" + extra) // Reuse these subexpressions to save some firrtl lines val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) monAssert (visible(edge.address(bundle), bundle.source, edge), "'A' channel carries an address illegal for the specified bank visibility") //The monitor doesn’t check for acquire T vs acquire B, it assumes that acquire B implies acquire T and only checks for acquire B //TODO: check for acquireT? when (bundle.opcode === TLMessages.AcquireBlock) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquireBlock from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquireBlock carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquireBlock smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquireBlock address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquireBlock carries invalid grow param" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquireBlock contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquireBlock is corrupt" + extra) } when (bundle.opcode === TLMessages.AcquirePerm) { monAssert (edge.master.emitsAcquireB(bundle.source, bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'A' channel carries AcquirePerm from a client which does not support Probe" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel AcquirePerm carries invalid source ID" + diplomacyInfo + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'A' channel AcquirePerm smaller than a beat" + extra) monAssert (is_aligned, "'A' channel AcquirePerm address not aligned to size" + extra) monAssert (TLPermissions.isGrow(bundle.param), "'A' channel AcquirePerm carries invalid grow param" + extra) monAssert (bundle.param =/= TLPermissions.NtoB, "'A' channel AcquirePerm requests NtoB" + extra) monAssert (~bundle.mask === 0.U, "'A' channel AcquirePerm contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel AcquirePerm is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.emitsGet(bundle.source, bundle.size), "'A' channel carries Get type which master claims it can't emit" + diplomacyInfo + extra) monAssert (edge.slave.supportsGetSafe(edge.address(bundle), bundle.size, None), "'A' channel carries Get type which slave claims it can't support" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel Get carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.emitsPutFull(bundle.source, bundle.size) && edge.slave.supportsPutFullSafe(edge.address(bundle), bundle.size), "'A' channel carries PutFull type which is unexpected using diplomatic parameters" + diplomacyInfo + extra) monAssert (source_ok, "'A' channel PutFull carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'A' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.emitsPutPartial(bundle.source, bundle.size) && edge.slave.supportsPutPartialSafe(edge.address(bundle), bundle.size), "'A' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel PutPartial carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'A' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'A' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.emitsArithmetic(bundle.source, bundle.size) && edge.slave.supportsArithmeticSafe(edge.address(bundle), bundle.size), "'A' channel carries Arithmetic type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Arithmetic carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'A' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.emitsLogical(bundle.source, bundle.size) && edge.slave.supportsLogicalSafe(edge.address(bundle), bundle.size), "'A' channel carries Logical type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Logical carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'A' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.emitsHint(bundle.source, bundle.size) && edge.slave.supportsHintSafe(edge.address(bundle), bundle.size), "'A' channel carries Hint type which is unexpected using diplomatic parameters" + extra) monAssert (source_ok, "'A' channel Hint carries invalid source ID" + diplomacyInfo + extra) monAssert (is_aligned, "'A' channel Hint address not aligned to size" + extra) monAssert (TLHints.isHints(bundle.param), "'A' channel Hint carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'A' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'A' channel Hint is corrupt" + extra) } } def legalizeFormatB(bundle: TLBundleB, edge: TLEdge): Unit = { monAssert (TLMessages.isB(bundle.opcode), "'B' channel has invalid opcode" + extra) monAssert (visible(edge.address(bundle), bundle.source, edge), "'B' channel carries an address illegal for the specified bank visibility") // Reuse these subexpressions to save some firrtl lines val address_ok = edge.manager.containsSafe(edge.address(bundle)) val is_aligned = edge.isAligned(bundle.address, bundle.size) val mask = edge.full_mask(bundle) val legal_source = Mux1H(edge.client.find(bundle.source), edge.client.clients.map(c => c.sourceId.start.U)) === bundle.source when (bundle.opcode === TLMessages.Probe) { assume (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'B' channel carries Probe type which is unexpected using diplomatic parameters" + extra) assume (address_ok, "'B' channel Probe carries unmanaged address" + extra) assume (legal_source, "'B' channel Probe carries source that is not first source" + extra) assume (is_aligned, "'B' channel Probe address not aligned to size" + extra) assume (TLPermissions.isCap(bundle.param), "'B' channel Probe carries invalid cap param" + extra) assume (bundle.mask === mask, "'B' channel Probe contains invalid mask" + extra) assume (!bundle.corrupt, "'B' channel Probe is corrupt" + extra) } when (bundle.opcode === TLMessages.Get) { monAssert (edge.master.supportsGet(edge.source(bundle), bundle.size) && edge.slave.emitsGetSafe(edge.address(bundle), bundle.size), "'B' channel carries Get type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel Get carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Get carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Get address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel Get carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel Get contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Get is corrupt" + extra) } when (bundle.opcode === TLMessages.PutFullData) { monAssert (edge.master.supportsPutFull(edge.source(bundle), bundle.size) && edge.slave.emitsPutFullSafe(edge.address(bundle), bundle.size), "'B' channel carries PutFull type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutFull carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutFull carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutFull address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutFull carries invalid param" + extra) monAssert (bundle.mask === mask, "'B' channel PutFull contains invalid mask" + extra) } when (bundle.opcode === TLMessages.PutPartialData) { monAssert (edge.master.supportsPutPartial(edge.source(bundle), bundle.size) && edge.slave.emitsPutPartialSafe(edge.address(bundle), bundle.size), "'B' channel carries PutPartial type which is unexpected using diplomatic parameters" + extra) monAssert (address_ok, "'B' channel PutPartial carries unmanaged address" + extra) monAssert (legal_source, "'B' channel PutPartial carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel PutPartial address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'B' channel PutPartial carries invalid param" + extra) monAssert ((bundle.mask & ~mask) === 0.U, "'B' channel PutPartial contains invalid mask" + extra) } when (bundle.opcode === TLMessages.ArithmeticData) { monAssert (edge.master.supportsArithmetic(edge.source(bundle), bundle.size) && edge.slave.emitsArithmeticSafe(edge.address(bundle), bundle.size), "'B' channel carries Arithmetic type unsupported by master" + extra) monAssert (address_ok, "'B' channel Arithmetic carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Arithmetic carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Arithmetic address not aligned to size" + extra) monAssert (TLAtomics.isArithmetic(bundle.param), "'B' channel Arithmetic carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Arithmetic contains invalid mask" + extra) } when (bundle.opcode === TLMessages.LogicalData) { monAssert (edge.master.supportsLogical(edge.source(bundle), bundle.size) && edge.slave.emitsLogicalSafe(edge.address(bundle), bundle.size), "'B' channel carries Logical type unsupported by client" + extra) monAssert (address_ok, "'B' channel Logical carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Logical carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Logical address not aligned to size" + extra) monAssert (TLAtomics.isLogical(bundle.param), "'B' channel Logical carries invalid opcode param" + extra) monAssert (bundle.mask === mask, "'B' channel Logical contains invalid mask" + extra) } when (bundle.opcode === TLMessages.Hint) { monAssert (edge.master.supportsHint(edge.source(bundle), bundle.size) && edge.slave.emitsHintSafe(edge.address(bundle), bundle.size), "'B' channel carries Hint type unsupported by client" + extra) monAssert (address_ok, "'B' channel Hint carries unmanaged address" + extra) monAssert (legal_source, "'B' channel Hint carries source that is not first source" + extra) monAssert (is_aligned, "'B' channel Hint address not aligned to size" + extra) monAssert (bundle.mask === mask, "'B' channel Hint contains invalid mask" + extra) monAssert (!bundle.corrupt, "'B' channel Hint is corrupt" + extra) } } def legalizeFormatC(bundle: TLBundleC, edge: TLEdge): Unit = { monAssert (TLMessages.isC(bundle.opcode), "'C' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val is_aligned = edge.isAligned(bundle.address, bundle.size) val address_ok = edge.manager.containsSafe(edge.address(bundle)) monAssert (visible(edge.address(bundle), bundle.source, edge), "'C' channel carries an address illegal for the specified bank visibility") when (bundle.opcode === TLMessages.ProbeAck) { monAssert (address_ok, "'C' channel ProbeAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAck carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAck smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAck address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAck carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel ProbeAck is corrupt" + extra) } when (bundle.opcode === TLMessages.ProbeAckData) { monAssert (address_ok, "'C' channel ProbeAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel ProbeAckData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ProbeAckData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ProbeAckData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ProbeAckData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.Release) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries Release type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel Release carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel Release smaller than a beat" + extra) monAssert (is_aligned, "'C' channel Release address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel Release carries invalid report param" + extra) monAssert (!bundle.corrupt, "'C' channel Release is corrupt" + extra) } when (bundle.opcode === TLMessages.ReleaseData) { monAssert (edge.master.emitsAcquireB(edge.source(bundle), bundle.size) && edge.slave.supportsAcquireBSafe(edge.address(bundle), bundle.size), "'C' channel carries ReleaseData type unsupported by manager" + extra) monAssert (edge.master.supportsProbe(edge.source(bundle), bundle.size) && edge.slave.emitsProbeSafe(edge.address(bundle), bundle.size), "'C' channel carries Release from a client which does not support Probe" + extra) monAssert (source_ok, "'C' channel ReleaseData carries invalid source ID" + extra) monAssert (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'C' channel ReleaseData smaller than a beat" + extra) monAssert (is_aligned, "'C' channel ReleaseData address not aligned to size" + extra) monAssert (TLPermissions.isReport(bundle.param), "'C' channel ReleaseData carries invalid report param" + extra) } when (bundle.opcode === TLMessages.AccessAck) { monAssert (address_ok, "'C' channel AccessAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel AccessAck is corrupt" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { monAssert (address_ok, "'C' channel AccessAckData carries unmanaged address" + extra) monAssert (source_ok, "'C' channel AccessAckData carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel AccessAckData address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel AccessAckData carries invalid param" + extra) } when (bundle.opcode === TLMessages.HintAck) { monAssert (address_ok, "'C' channel HintAck carries unmanaged address" + extra) monAssert (source_ok, "'C' channel HintAck carries invalid source ID" + extra) monAssert (is_aligned, "'C' channel HintAck address not aligned to size" + extra) monAssert (bundle.param === 0.U, "'C' channel HintAck carries invalid param" + extra) monAssert (!bundle.corrupt, "'C' channel HintAck is corrupt" + extra) } } def legalizeFormatD(bundle: TLBundleD, edge: TLEdge): Unit = { assume (TLMessages.isD(bundle.opcode), "'D' channel has invalid opcode" + extra) val source_ok = edge.client.contains(bundle.source) val sink_ok = bundle.sink < edge.manager.endSinkId.U val deny_put_ok = edge.manager.mayDenyPut.B val deny_get_ok = edge.manager.mayDenyGet.B when (bundle.opcode === TLMessages.ReleaseAck) { assume (source_ok, "'D' channel ReleaseAck carries invalid source ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel ReleaseAck smaller than a beat" + extra) assume (bundle.param === 0.U, "'D' channel ReleaseeAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel ReleaseAck is corrupt" + extra) assume (!bundle.denied, "'D' channel ReleaseAck is denied" + extra) } when (bundle.opcode === TLMessages.Grant) { assume (source_ok, "'D' channel Grant carries invalid source ID" + extra) assume (sink_ok, "'D' channel Grant carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel Grant smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel Grant carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel Grant carries toN param" + extra) assume (!bundle.corrupt, "'D' channel Grant is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel Grant is denied" + extra) } when (bundle.opcode === TLMessages.GrantData) { assume (source_ok, "'D' channel GrantData carries invalid source ID" + extra) assume (sink_ok, "'D' channel GrantData carries invalid sink ID" + extra) assume (bundle.size >= log2Ceil(edge.manager.beatBytes).U, "'D' channel GrantData smaller than a beat" + extra) assume (TLPermissions.isCap(bundle.param), "'D' channel GrantData carries invalid cap param" + extra) assume (bundle.param =/= TLPermissions.toN, "'D' channel GrantData carries toN param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel GrantData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel GrantData is denied" + extra) } when (bundle.opcode === TLMessages.AccessAck) { assume (source_ok, "'D' channel AccessAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel AccessAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel AccessAck is denied" + extra) } when (bundle.opcode === TLMessages.AccessAckData) { assume (source_ok, "'D' channel AccessAckData carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel AccessAckData carries invalid param" + extra) assume (!bundle.denied || bundle.corrupt, "'D' channel AccessAckData is denied but not corrupt" + extra) assume (deny_get_ok || !bundle.denied, "'D' channel AccessAckData is denied" + extra) } when (bundle.opcode === TLMessages.HintAck) { assume (source_ok, "'D' channel HintAck carries invalid source ID" + extra) // size is ignored assume (bundle.param === 0.U, "'D' channel HintAck carries invalid param" + extra) assume (!bundle.corrupt, "'D' channel HintAck is corrupt" + extra) assume (deny_put_ok || !bundle.denied, "'D' channel HintAck is denied" + extra) } } def legalizeFormatE(bundle: TLBundleE, edge: TLEdge): Unit = { val sink_ok = bundle.sink < edge.manager.endSinkId.U monAssert (sink_ok, "'E' channels carries invalid sink ID" + extra) } def legalizeFormat(bundle: TLBundle, edge: TLEdge) = { when (bundle.a.valid) { legalizeFormatA(bundle.a.bits, edge) } when (bundle.d.valid) { legalizeFormatD(bundle.d.bits, edge) } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { when (bundle.b.valid) { legalizeFormatB(bundle.b.bits, edge) } when (bundle.c.valid) { legalizeFormatC(bundle.c.bits, edge) } when (bundle.e.valid) { legalizeFormatE(bundle.e.bits, edge) } } else { monAssert (!bundle.b.valid, "'B' channel valid and not TL-C" + extra) monAssert (!bundle.c.valid, "'C' channel valid and not TL-C" + extra) monAssert (!bundle.e.valid, "'E' channel valid and not TL-C" + extra) } } def legalizeMultibeatA(a: DecoupledIO[TLBundleA], edge: TLEdge): Unit = { val a_first = edge.first(a.bits, a.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (a.valid && !a_first) { monAssert (a.bits.opcode === opcode, "'A' channel opcode changed within multibeat operation" + extra) monAssert (a.bits.param === param, "'A' channel param changed within multibeat operation" + extra) monAssert (a.bits.size === size, "'A' channel size changed within multibeat operation" + extra) monAssert (a.bits.source === source, "'A' channel source changed within multibeat operation" + extra) monAssert (a.bits.address=== address,"'A' channel address changed with multibeat operation" + extra) } when (a.fire && a_first) { opcode := a.bits.opcode param := a.bits.param size := a.bits.size source := a.bits.source address := a.bits.address } } def legalizeMultibeatB(b: DecoupledIO[TLBundleB], edge: TLEdge): Unit = { val b_first = edge.first(b.bits, b.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (b.valid && !b_first) { monAssert (b.bits.opcode === opcode, "'B' channel opcode changed within multibeat operation" + extra) monAssert (b.bits.param === param, "'B' channel param changed within multibeat operation" + extra) monAssert (b.bits.size === size, "'B' channel size changed within multibeat operation" + extra) monAssert (b.bits.source === source, "'B' channel source changed within multibeat operation" + extra) monAssert (b.bits.address=== address,"'B' channel addresss changed with multibeat operation" + extra) } when (b.fire && b_first) { opcode := b.bits.opcode param := b.bits.param size := b.bits.size source := b.bits.source address := b.bits.address } } def legalizeADSourceFormal(bundle: TLBundle, edge: TLEdge): Unit = { // Symbolic variable val sym_source = Wire(UInt(edge.client.endSourceId.W)) // TODO: Connect sym_source to a fixed value for simulation and to a // free wire in formal sym_source := 0.U // Type casting Int to UInt val maxSourceId = Wire(UInt(edge.client.endSourceId.W)) maxSourceId := edge.client.endSourceId.U // Delayed verison of sym_source val sym_source_d = Reg(UInt(edge.client.endSourceId.W)) sym_source_d := sym_source // These will be constraints for FV setup Property( MonitorDirection.Monitor, (sym_source === sym_source_d), "sym_source should remain stable", PropertyClass.Default) Property( MonitorDirection.Monitor, (sym_source <= maxSourceId), "sym_source should take legal value", PropertyClass.Default) val my_resp_pend = RegInit(false.B) val my_opcode = Reg(UInt()) val my_size = Reg(UInt()) val a_first = bundle.a.valid && edge.first(bundle.a.bits, bundle.a.fire) val d_first = bundle.d.valid && edge.first(bundle.d.bits, bundle.d.fire) val my_a_first_beat = a_first && (bundle.a.bits.source === sym_source) val my_d_first_beat = d_first && (bundle.d.bits.source === sym_source) val my_clr_resp_pend = (bundle.d.fire && my_d_first_beat) val my_set_resp_pend = (bundle.a.fire && my_a_first_beat && !my_clr_resp_pend) when (my_set_resp_pend) { my_resp_pend := true.B } .elsewhen (my_clr_resp_pend) { my_resp_pend := false.B } when (my_a_first_beat) { my_opcode := bundle.a.bits.opcode my_size := bundle.a.bits.size } val my_resp_size = Mux(my_a_first_beat, bundle.a.bits.size, my_size) val my_resp_opcode = Mux(my_a_first_beat, bundle.a.bits.opcode, my_opcode) val my_resp_opcode_legal = Wire(Bool()) when ((my_resp_opcode === TLMessages.Get) || (my_resp_opcode === TLMessages.ArithmeticData) || (my_resp_opcode === TLMessages.LogicalData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAckData) } .elsewhen ((my_resp_opcode === TLMessages.PutFullData) || (my_resp_opcode === TLMessages.PutPartialData)) { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.AccessAck) } .otherwise { my_resp_opcode_legal := (bundle.d.bits.opcode === TLMessages.HintAck) } monAssert (IfThen(my_resp_pend, !my_a_first_beat), "Request message should not be sent with a source ID, for which a response message" + "is already pending (not received until current cycle) for a prior request message" + "with the same source ID" + extra) assume (IfThen(my_clr_resp_pend, (my_set_resp_pend || my_resp_pend)), "Response message should be accepted with a source ID only if a request message with the" + "same source ID has been accepted or is being accepted in the current cycle" + extra) assume (IfThen(my_d_first_beat, (my_a_first_beat || my_resp_pend)), "Response message should be sent with a source ID only if a request message with the" + "same source ID has been accepted or is being sent in the current cycle" + extra) assume (IfThen(my_d_first_beat, (bundle.d.bits.size === my_resp_size)), "If d_valid is 1, then d_size should be same as a_size of the corresponding request" + "message" + extra) assume (IfThen(my_d_first_beat, my_resp_opcode_legal), "If d_valid is 1, then d_opcode should correspond with a_opcode of the corresponding" + "request message" + extra) } def legalizeMultibeatC(c: DecoupledIO[TLBundleC], edge: TLEdge): Unit = { val c_first = edge.first(c.bits, c.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val address = Reg(UInt()) when (c.valid && !c_first) { monAssert (c.bits.opcode === opcode, "'C' channel opcode changed within multibeat operation" + extra) monAssert (c.bits.param === param, "'C' channel param changed within multibeat operation" + extra) monAssert (c.bits.size === size, "'C' channel size changed within multibeat operation" + extra) monAssert (c.bits.source === source, "'C' channel source changed within multibeat operation" + extra) monAssert (c.bits.address=== address,"'C' channel address changed with multibeat operation" + extra) } when (c.fire && c_first) { opcode := c.bits.opcode param := c.bits.param size := c.bits.size source := c.bits.source address := c.bits.address } } def legalizeMultibeatD(d: DecoupledIO[TLBundleD], edge: TLEdge): Unit = { val d_first = edge.first(d.bits, d.fire) val opcode = Reg(UInt()) val param = Reg(UInt()) val size = Reg(UInt()) val source = Reg(UInt()) val sink = Reg(UInt()) val denied = Reg(Bool()) when (d.valid && !d_first) { assume (d.bits.opcode === opcode, "'D' channel opcode changed within multibeat operation" + extra) assume (d.bits.param === param, "'D' channel param changed within multibeat operation" + extra) assume (d.bits.size === size, "'D' channel size changed within multibeat operation" + extra) assume (d.bits.source === source, "'D' channel source changed within multibeat operation" + extra) assume (d.bits.sink === sink, "'D' channel sink changed with multibeat operation" + extra) assume (d.bits.denied === denied, "'D' channel denied changed with multibeat operation" + extra) } when (d.fire && d_first) { opcode := d.bits.opcode param := d.bits.param size := d.bits.size source := d.bits.source sink := d.bits.sink denied := d.bits.denied } } def legalizeMultibeat(bundle: TLBundle, edge: TLEdge): Unit = { legalizeMultibeatA(bundle.a, edge) legalizeMultibeatD(bundle.d, edge) if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { legalizeMultibeatB(bundle.b, edge) legalizeMultibeatC(bundle.c, edge) } } //This is left in for almond which doesn't adhere to the tilelink protocol @deprecated("Use legalizeADSource instead if possible","") def legalizeADSourceOld(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.client.endSourceId.W)) val a_first = edge.first(bundle.a.bits, bundle.a.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val a_set = WireInit(0.U(edge.client.endSourceId.W)) when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) assert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) assume((a_set | inflight)(bundle.d.bits.source), "'D' channel acknowledged for nothing inflight" + extra) } if (edge.manager.minLatency > 0) { assume(a_set =/= d_clr || !a_set.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") assert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeADSource(bundle: TLBundle, edge: TLEdge): Unit = { val a_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val a_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_a_opcode_bus_size = log2Ceil(a_opcode_bus_size) val log_a_size_bus_size = log2Ceil(a_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) // size up to avoid width error inflight.suggestName("inflight") val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) inflight_opcodes.suggestName("inflight_opcodes") val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) inflight_sizes.suggestName("inflight_sizes") val a_first = edge.first(bundle.a.bits, bundle.a.fire) a_first.suggestName("a_first") val d_first = edge.first(bundle.d.bits, bundle.d.fire) d_first.suggestName("d_first") val a_set = WireInit(0.U(edge.client.endSourceId.W)) val a_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) a_set.suggestName("a_set") a_set_wo_ready.suggestName("a_set_wo_ready") val a_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) a_opcodes_set.suggestName("a_opcodes_set") val a_sizes_set = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) a_sizes_set.suggestName("a_sizes_set") val a_opcode_lookup = WireInit(0.U((a_opcode_bus_size - 1).W)) a_opcode_lookup.suggestName("a_opcode_lookup") a_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_a_opcode_bus_size.U) & size_to_numfullbits(1.U << log_a_opcode_bus_size.U)) >> 1.U val a_size_lookup = WireInit(0.U((1 << log_a_size_bus_size).W)) a_size_lookup.suggestName("a_size_lookup") a_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_a_size_bus_size.U) & size_to_numfullbits(1.U << log_a_size_bus_size.U)) >> 1.U val responseMap = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.Grant, TLMessages.Grant)) val responseMapSecondOption = VecInit(Seq(TLMessages.AccessAck, TLMessages.AccessAck, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.AccessAckData, TLMessages.HintAck, TLMessages.GrantData, TLMessages.Grant)) val a_opcodes_set_interm = WireInit(0.U(a_opcode_bus_size.W)) a_opcodes_set_interm.suggestName("a_opcodes_set_interm") val a_sizes_set_interm = WireInit(0.U(a_size_bus_size.W)) a_sizes_set_interm.suggestName("a_sizes_set_interm") when (bundle.a.valid && a_first && edge.isRequest(bundle.a.bits)) { a_set_wo_ready := UIntToOH(bundle.a.bits.source) } when (bundle.a.fire && a_first && edge.isRequest(bundle.a.bits)) { a_set := UIntToOH(bundle.a.bits.source) a_opcodes_set_interm := (bundle.a.bits.opcode << 1.U) | 1.U a_sizes_set_interm := (bundle.a.bits.size << 1.U) | 1.U a_opcodes_set := (a_opcodes_set_interm) << (bundle.a.bits.source << log_a_opcode_bus_size.U) a_sizes_set := (a_sizes_set_interm) << (bundle.a.bits.source << log_a_size_bus_size.U) monAssert(!inflight(bundle.a.bits.source), "'A' channel re-used a source ID" + extra) } val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_a_opcode_bus_size).W)) d_opcodes_clr.suggestName("d_opcodes_clr") val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_a_size_bus_size).W)) d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_a_opcode_bus_size.U) << (bundle.d.bits.source << log_a_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_a_size_bus_size.U) << (bundle.d.bits.source << log_a_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && !d_release_ack) { val same_cycle_resp = bundle.a.valid && a_first && edge.isRequest(bundle.a.bits) && (bundle.a.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.opcode === responseMap(bundle.a.bits.opcode)) || (bundle.d.bits.opcode === responseMapSecondOption(bundle.a.bits.opcode)), "'D' channel contains improper opcode response" + extra) assume((bundle.a.bits.size === bundle.d.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.opcode === responseMap(a_opcode_lookup)) || (bundle.d.bits.opcode === responseMapSecondOption(a_opcode_lookup)), "'D' channel contains improper opcode response" + extra) assume((bundle.d.bits.size === a_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && a_first && bundle.a.valid && (bundle.a.bits.source === bundle.d.bits.source) && !d_release_ack) { assume((!bundle.d.ready) || bundle.a.ready, "ready check") } if (edge.manager.minLatency > 0) { assume(a_set_wo_ready =/= d_clr_wo_ready || !a_set_wo_ready.orR, s"'A' and 'D' concurrent, despite minlatency > 0" + extra) } inflight := (inflight | a_set) & ~d_clr inflight_opcodes := (inflight_opcodes | a_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | a_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.a.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeCDSource(bundle: TLBundle, edge: TLEdge): Unit = { val c_size_bus_size = edge.bundle.sizeBits + 1 //add one so that 0 is not mapped to anything (size 0 -> size 1 in map, size 0 in map means unset) val c_opcode_bus_size = 3 + 1 //opcode size is 3, but add so that 0 is not mapped to anything val log_c_opcode_bus_size = log2Ceil(c_opcode_bus_size) val log_c_size_bus_size = log2Ceil(c_size_bus_size) def size_to_numfullbits(x: UInt): UInt = (1.U << x) - 1.U //convert a number to that many full bits val inflight = RegInit(0.U((2 max edge.client.endSourceId).W)) val inflight_opcodes = RegInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val inflight_sizes = RegInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) inflight.suggestName("inflight") inflight_opcodes.suggestName("inflight_opcodes") inflight_sizes.suggestName("inflight_sizes") val c_first = edge.first(bundle.c.bits, bundle.c.fire) val d_first = edge.first(bundle.d.bits, bundle.d.fire) c_first.suggestName("c_first") d_first.suggestName("d_first") val c_set = WireInit(0.U(edge.client.endSourceId.W)) val c_set_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val c_opcodes_set = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val c_sizes_set = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) c_set.suggestName("c_set") c_set_wo_ready.suggestName("c_set_wo_ready") c_opcodes_set.suggestName("c_opcodes_set") c_sizes_set.suggestName("c_sizes_set") val c_opcode_lookup = WireInit(0.U((1 << log_c_opcode_bus_size).W)) val c_size_lookup = WireInit(0.U((1 << log_c_size_bus_size).W)) c_opcode_lookup := ((inflight_opcodes) >> (bundle.d.bits.source << log_c_opcode_bus_size.U) & size_to_numfullbits(1.U << log_c_opcode_bus_size.U)) >> 1.U c_size_lookup := ((inflight_sizes) >> (bundle.d.bits.source << log_c_size_bus_size.U) & size_to_numfullbits(1.U << log_c_size_bus_size.U)) >> 1.U c_opcode_lookup.suggestName("c_opcode_lookup") c_size_lookup.suggestName("c_size_lookup") val c_opcodes_set_interm = WireInit(0.U(c_opcode_bus_size.W)) val c_sizes_set_interm = WireInit(0.U(c_size_bus_size.W)) c_opcodes_set_interm.suggestName("c_opcodes_set_interm") c_sizes_set_interm.suggestName("c_sizes_set_interm") when (bundle.c.valid && c_first && edge.isRequest(bundle.c.bits)) { c_set_wo_ready := UIntToOH(bundle.c.bits.source) } when (bundle.c.fire && c_first && edge.isRequest(bundle.c.bits)) { c_set := UIntToOH(bundle.c.bits.source) c_opcodes_set_interm := (bundle.c.bits.opcode << 1.U) | 1.U c_sizes_set_interm := (bundle.c.bits.size << 1.U) | 1.U c_opcodes_set := (c_opcodes_set_interm) << (bundle.c.bits.source << log_c_opcode_bus_size.U) c_sizes_set := (c_sizes_set_interm) << (bundle.c.bits.source << log_c_size_bus_size.U) monAssert(!inflight(bundle.c.bits.source), "'C' channel re-used a source ID" + extra) } val c_probe_ack = bundle.c.bits.opcode === TLMessages.ProbeAck || bundle.c.bits.opcode === TLMessages.ProbeAckData val d_clr = WireInit(0.U(edge.client.endSourceId.W)) val d_clr_wo_ready = WireInit(0.U(edge.client.endSourceId.W)) val d_opcodes_clr = WireInit(0.U((edge.client.endSourceId << log_c_opcode_bus_size).W)) val d_sizes_clr = WireInit(0.U((edge.client.endSourceId << log_c_size_bus_size).W)) d_clr.suggestName("d_clr") d_clr_wo_ready.suggestName("d_clr_wo_ready") d_opcodes_clr.suggestName("d_opcodes_clr") d_sizes_clr.suggestName("d_sizes_clr") val d_release_ack = bundle.d.bits.opcode === TLMessages.ReleaseAck when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr_wo_ready := UIntToOH(bundle.d.bits.source) } when (bundle.d.fire && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { d_clr := UIntToOH(bundle.d.bits.source) d_opcodes_clr := size_to_numfullbits(1.U << log_c_opcode_bus_size.U) << (bundle.d.bits.source << log_c_opcode_bus_size.U) d_sizes_clr := size_to_numfullbits(1.U << log_c_size_bus_size.U) << (bundle.d.bits.source << log_c_size_bus_size.U) } when (bundle.d.valid && d_first && edge.isResponse(bundle.d.bits) && d_release_ack) { val same_cycle_resp = bundle.c.valid && c_first && edge.isRequest(bundle.c.bits) && (bundle.c.bits.source === bundle.d.bits.source) assume(((inflight)(bundle.d.bits.source)) || same_cycle_resp, "'D' channel acknowledged for nothing inflight" + extra) when (same_cycle_resp) { assume((bundle.d.bits.size === bundle.c.bits.size), "'D' channel contains improper response size" + extra) } .otherwise { assume((bundle.d.bits.size === c_size_lookup), "'D' channel contains improper response size" + extra) } } when(bundle.d.valid && d_first && c_first && bundle.c.valid && (bundle.c.bits.source === bundle.d.bits.source) && d_release_ack && !c_probe_ack) { assume((!bundle.d.ready) || bundle.c.ready, "ready check") } if (edge.manager.minLatency > 0) { when (c_set_wo_ready.orR) { assume(c_set_wo_ready =/= d_clr_wo_ready, s"'C' and 'D' concurrent, despite minlatency > 0" + extra) } } inflight := (inflight | c_set) & ~d_clr inflight_opcodes := (inflight_opcodes | c_opcodes_set) & ~d_opcodes_clr inflight_sizes := (inflight_sizes | c_sizes_set) & ~d_sizes_clr val watchdog = RegInit(0.U(32.W)) val limit = PlusArg("tilelink_timeout", docstring="Kill emulation after INT waiting TileLink cycles. Off if 0.") monAssert (!inflight.orR || limit === 0.U || watchdog < limit, "TileLink timeout expired" + extra) watchdog := watchdog + 1.U when (bundle.c.fire || bundle.d.fire) { watchdog := 0.U } } def legalizeDESink(bundle: TLBundle, edge: TLEdge): Unit = { val inflight = RegInit(0.U(edge.manager.endSinkId.W)) val d_first = edge.first(bundle.d.bits, bundle.d.fire) val e_first = true.B val d_set = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.d.fire && d_first && edge.isRequest(bundle.d.bits)) { d_set := UIntToOH(bundle.d.bits.sink) assume(!inflight(bundle.d.bits.sink), "'D' channel re-used a sink ID" + extra) } val e_clr = WireInit(0.U(edge.manager.endSinkId.W)) when (bundle.e.fire && e_first && edge.isResponse(bundle.e.bits)) { e_clr := UIntToOH(bundle.e.bits.sink) monAssert((d_set | inflight)(bundle.e.bits.sink), "'E' channel acknowledged for nothing inflight" + extra) } // edge.client.minLatency applies to BC, not DE inflight := (inflight | d_set) & ~e_clr } def legalizeUnique(bundle: TLBundle, edge: TLEdge): Unit = { val sourceBits = log2Ceil(edge.client.endSourceId) val tooBig = 14 // >16kB worth of flight information gets to be too much if (sourceBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with source bits (${sourceBits}) > ${tooBig}; A=>D transaction flight will not be checked") } else { if (args.edge.params(TestplanTestType).simulation) { if (args.edge.params(TLMonitorStrictMode)) { legalizeADSource(bundle, edge) legalizeCDSource(bundle, edge) } else { legalizeADSourceOld(bundle, edge) } } if (args.edge.params(TestplanTestType).formal) { legalizeADSourceFormal(bundle, edge) } } if (edge.client.anySupportProbe && edge.manager.anySupportAcquireB) { // legalizeBCSourceAddress(bundle, edge) // too much state needed to synthesize... val sinkBits = log2Ceil(edge.manager.endSinkId) if (sinkBits > tooBig) { println(s"WARNING: TLMonitor instantiated on a bus with sink bits (${sinkBits}) > ${tooBig}; D=>E transaction flight will not be checked") } else { legalizeDESink(bundle, edge) } } } def legalize(bundle: TLBundle, edge: TLEdge, reset: Reset): Unit = { legalizeFormat (bundle, edge) legalizeMultibeat (bundle, edge) legalizeUnique (bundle, edge) } } File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File PlusArg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.experimental._ import chisel3.util.HasBlackBoxResource @deprecated("This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05") case class PlusArgInfo(default: BigInt, docstring: String) /** Case class for PlusArg information * * @tparam A scala type of the PlusArg value * @param default optional default value * @param docstring text to include in the help * @param doctype description of the Verilog type of the PlusArg value (e.g. STRING, INT) */ private case class PlusArgContainer[A](default: Option[A], docstring: String, doctype: String) /** Typeclass for converting a type to a doctype string * @tparam A some type */ trait Doctypeable[A] { /** Return the doctype string for some option */ def toDoctype(a: Option[A]): String } /** Object containing implementations of the Doctypeable typeclass */ object Doctypes { /** Converts an Int => "INT" */ implicit val intToDoctype = new Doctypeable[Int] { def toDoctype(a: Option[Int]) = "INT" } /** Converts a BigInt => "INT" */ implicit val bigIntToDoctype = new Doctypeable[BigInt] { def toDoctype(a: Option[BigInt]) = "INT" } /** Converts a String => "STRING" */ implicit val stringToDoctype = new Doctypeable[String] { def toDoctype(a: Option[String]) = "STRING" } } class plusarg_reader(val format: String, val default: BigInt, val docstring: String, val width: Int) extends BlackBox(Map( "FORMAT" -> StringParam(format), "DEFAULT" -> IntParam(default), "WIDTH" -> IntParam(width) )) with HasBlackBoxResource { val io = IO(new Bundle { val out = Output(UInt(width.W)) }) addResource("/vsrc/plusarg_reader.v") } /* This wrapper class has no outputs, making it clear it is a simulation-only construct */ class PlusArgTimeout(val format: String, val default: BigInt, val docstring: String, val width: Int) extends Module { val io = IO(new Bundle { val count = Input(UInt(width.W)) }) val max = Module(new plusarg_reader(format, default, docstring, width)).io.out when (max > 0.U) { assert (io.count < max, s"Timeout exceeded: $docstring") } } import Doctypes._ object PlusArg { /** PlusArg("foo") will return 42.U if the simulation is run with +foo=42 * Do not use this as an initial register value. The value is set in an * initial block and thus accessing it from another initial is racey. * Add a docstring to document the arg, which can be dumped in an elaboration * pass. */ def apply(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32): UInt = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new plusarg_reader(name + "=%d", default, docstring, width)).io.out } /** PlusArg.timeout(name, default, docstring)(count) will use chisel.assert * to kill the simulation when count exceeds the specified integer argument. * Default 0 will never assert. */ def timeout(name: String, default: BigInt = 0, docstring: String = "", width: Int = 32)(count: UInt): Unit = { PlusArgArtefacts.append(name, Some(default), docstring) Module(new PlusArgTimeout(name + "=%d", default, docstring, width)).io.count := count } } object PlusArgArtefacts { private var artefacts: Map[String, PlusArgContainer[_]] = Map.empty /* Add a new PlusArg */ @deprecated( "Use `Some(BigInt)` to specify a `default` value. This will be removed in Rocket Chip 2020.08", "Rocket Chip 2020.05" ) def append(name: String, default: BigInt, docstring: String): Unit = append(name, Some(default), docstring) /** Add a new PlusArg * * @tparam A scala type of the PlusArg value * @param name name for the PlusArg * @param default optional default value * @param docstring text to include in the help */ def append[A : Doctypeable](name: String, default: Option[A], docstring: String): Unit = artefacts = artefacts ++ Map(name -> PlusArgContainer(default, docstring, implicitly[Doctypeable[A]].toDoctype(default))) /* From plus args, generate help text */ private def serializeHelp_cHeader(tab: String = ""): String = artefacts .map{ case(arg, info) => s"""|$tab+$arg=${info.doctype}\\n\\ |$tab${" "*20}${info.docstring}\\n\\ |""".stripMargin ++ info.default.map{ case default => s"$tab${" "*22}(default=${default})\\n\\\n"}.getOrElse("") }.toSeq.mkString("\\n\\\n") ++ "\"" /* From plus args, generate a char array of their names */ private def serializeArray_cHeader(tab: String = ""): String = { val prettyTab = tab + " " * 44 // Length of 'static const ...' s"${tab}static const char * verilog_plusargs [] = {\\\n" ++ artefacts .map{ case(arg, _) => s"""$prettyTab"$arg",\\\n""" } .mkString("")++ s"${prettyTab}0};" } /* Generate C code to be included in emulator.cc that helps with * argument parsing based on available Verilog PlusArgs */ def serialize_cHeader(): String = s"""|#define PLUSARG_USAGE_OPTIONS \"EMULATOR VERILOG PLUSARGS\\n\\ |${serializeHelp_cHeader(" "*7)} |${serializeArray_cHeader()} |""".stripMargin } File package.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip import chisel3._ import chisel3.util._ import scala.math.min import scala.collection.{immutable, mutable} package object util { implicit class UnzippableOption[S, T](val x: Option[(S, T)]) { def unzip = (x.map(_._1), x.map(_._2)) } implicit class UIntIsOneOf(private val x: UInt) extends AnyVal { def isOneOf(s: Seq[UInt]): Bool = s.map(x === _).orR def isOneOf(u1: UInt, u2: UInt*): Bool = isOneOf(u1 +: u2.toSeq) } implicit class VecToAugmentedVec[T <: Data](private val x: Vec[T]) extends AnyVal { /** Like Vec.apply(idx), but tolerates indices of mismatched width */ def extract(idx: UInt): T = x((idx | 0.U(log2Ceil(x.size).W)).extract(log2Ceil(x.size) - 1, 0)) } implicit class SeqToAugmentedSeq[T <: Data](private val x: Seq[T]) extends AnyVal { def apply(idx: UInt): T = { if (x.size <= 1) { x.head } else if (!isPow2(x.size)) { // For non-power-of-2 seqs, reflect elements to simplify decoder (x ++ x.takeRight(x.size & -x.size)).toSeq(idx) } else { // Ignore MSBs of idx val truncIdx = if (idx.isWidthKnown && idx.getWidth <= log2Ceil(x.size)) idx else (idx | 0.U(log2Ceil(x.size).W))(log2Ceil(x.size)-1, 0) x.zipWithIndex.tail.foldLeft(x.head) { case (prev, (cur, i)) => Mux(truncIdx === i.U, cur, prev) } } } def extract(idx: UInt): T = VecInit(x).extract(idx) def asUInt: UInt = Cat(x.map(_.asUInt).reverse) def rotate(n: Int): Seq[T] = x.drop(n) ++ x.take(n) def rotate(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotate(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } def rotateRight(n: Int): Seq[T] = x.takeRight(n) ++ x.dropRight(n) def rotateRight(n: UInt): Seq[T] = { if (x.size <= 1) { x } else { require(isPow2(x.size)) val amt = n.padTo(log2Ceil(x.size)) (0 until log2Ceil(x.size)).foldLeft(x)((r, i) => (r.rotateRight(1 << i) zip r).map { case (s, a) => Mux(amt(i), s, a) }) } } } // allow bitwise ops on Seq[Bool] just like UInt implicit class SeqBoolBitwiseOps(private val x: Seq[Bool]) extends AnyVal { def & (y: Seq[Bool]): Seq[Bool] = (x zip y).map { case (a, b) => a && b } def | (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a || b } def ^ (y: Seq[Bool]): Seq[Bool] = padZip(x, y).map { case (a, b) => a ^ b } def << (n: Int): Seq[Bool] = Seq.fill(n)(false.B) ++ x def >> (n: Int): Seq[Bool] = x drop n def unary_~ : Seq[Bool] = x.map(!_) def andR: Bool = if (x.isEmpty) true.B else x.reduce(_&&_) def orR: Bool = if (x.isEmpty) false.B else x.reduce(_||_) def xorR: Bool = if (x.isEmpty) false.B else x.reduce(_^_) private def padZip(y: Seq[Bool], z: Seq[Bool]): Seq[(Bool, Bool)] = y.padTo(z.size, false.B) zip z.padTo(y.size, false.B) } implicit class DataToAugmentedData[T <: Data](private val x: T) extends AnyVal { def holdUnless(enable: Bool): T = Mux(enable, x, RegEnable(x, enable)) def getElements: Seq[Element] = x match { case e: Element => Seq(e) case a: Aggregate => a.getElements.flatMap(_.getElements) } } /** Any Data subtype that has a Bool member named valid. */ type DataCanBeValid = Data { val valid: Bool } implicit class SeqMemToAugmentedSeqMem[T <: Data](private val x: SyncReadMem[T]) extends AnyVal { def readAndHold(addr: UInt, enable: Bool): T = x.read(addr, enable) holdUnless RegNext(enable) } implicit class StringToAugmentedString(private val x: String) extends AnyVal { /** converts from camel case to to underscores, also removing all spaces */ def underscore: String = x.tail.foldLeft(x.headOption.map(_.toLower + "") getOrElse "") { case (acc, c) if c.isUpper => acc + "_" + c.toLower case (acc, c) if c == ' ' => acc case (acc, c) => acc + c } /** converts spaces or underscores to hyphens, also lowering case */ def kebab: String = x.toLowerCase map { case ' ' => '-' case '_' => '-' case c => c } def named(name: Option[String]): String = { x + name.map("_named_" + _ ).getOrElse("_with_no_name") } def named(name: String): String = named(Some(name)) } implicit def uintToBitPat(x: UInt): BitPat = BitPat(x) implicit def wcToUInt(c: WideCounter): UInt = c.value implicit class UIntToAugmentedUInt(private val x: UInt) extends AnyVal { def sextTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(Fill(n - x.getWidth, x(x.getWidth-1)), x) } def padTo(n: Int): UInt = { require(x.getWidth <= n) if (x.getWidth == n) x else Cat(0.U((n - x.getWidth).W), x) } // shifts left by n if n >= 0, or right by -n if n < 0 def << (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << n(w-1, 0) Mux(n(w), shifted >> (1 << w), shifted) } // shifts right by n if n >= 0, or left by -n if n < 0 def >> (n: SInt): UInt = { val w = n.getWidth - 1 require(w <= 30) val shifted = x << (1 << w) >> n(w-1, 0) Mux(n(w), shifted, shifted >> (1 << w)) } // Like UInt.apply(hi, lo), but returns 0.U for zero-width extracts def extract(hi: Int, lo: Int): UInt = { require(hi >= lo-1) if (hi == lo-1) 0.U else x(hi, lo) } // Like Some(UInt.apply(hi, lo)), but returns None for zero-width extracts def extractOption(hi: Int, lo: Int): Option[UInt] = { require(hi >= lo-1) if (hi == lo-1) None else Some(x(hi, lo)) } // like x & ~y, but first truncate or zero-extend y to x's width def andNot(y: UInt): UInt = x & ~(y | (x & 0.U)) def rotateRight(n: Int): UInt = if (n == 0) x else Cat(x(n-1, 0), x >> n) def rotateRight(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateRight(1 << i), r)) } } def rotateLeft(n: Int): UInt = if (n == 0) x else Cat(x(x.getWidth-1-n,0), x(x.getWidth-1,x.getWidth-n)) def rotateLeft(n: UInt): UInt = { if (x.getWidth <= 1) { x } else { val amt = n.padTo(log2Ceil(x.getWidth)) (0 until log2Ceil(x.getWidth)).foldLeft(x)((r, i) => Mux(amt(i), r.rotateLeft(1 << i), r)) } } // compute (this + y) % n, given (this < n) and (y < n) def addWrap(y: UInt, n: Int): UInt = { val z = x +& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z >= n.U, z - n.U, z)(log2Ceil(n)-1, 0) } // compute (this - y) % n, given (this < n) and (y < n) def subWrap(y: UInt, n: Int): UInt = { val z = x -& y if (isPow2(n)) z(n.log2-1, 0) else Mux(z(z.getWidth-1), z + n.U, z)(log2Ceil(n)-1, 0) } def grouped(width: Int): Seq[UInt] = (0 until x.getWidth by width).map(base => x(base + width - 1, base)) def inRange(base: UInt, bounds: UInt) = x >= base && x < bounds def ## (y: Option[UInt]): UInt = y.map(x ## _).getOrElse(x) // Like >=, but prevents x-prop for ('x >= 0) def >== (y: UInt): Bool = x >= y || y === 0.U } implicit class OptionUIntToAugmentedOptionUInt(private val x: Option[UInt]) extends AnyVal { def ## (y: UInt): UInt = x.map(_ ## y).getOrElse(y) def ## (y: Option[UInt]): Option[UInt] = x.map(_ ## y) } implicit class BooleanToAugmentedBoolean(private val x: Boolean) extends AnyVal { def toInt: Int = if (x) 1 else 0 // this one's snagged from scalaz def option[T](z: => T): Option[T] = if (x) Some(z) else None } implicit class IntToAugmentedInt(private val x: Int) extends AnyVal { // exact log2 def log2: Int = { require(isPow2(x)) log2Ceil(x) } } def OH1ToOH(x: UInt): UInt = (x << 1 | 1.U) & ~Cat(0.U(1.W), x) def OH1ToUInt(x: UInt): UInt = OHToUInt(OH1ToOH(x)) def UIntToOH1(x: UInt, width: Int): UInt = ~((-1).S(width.W).asUInt << x)(width-1, 0) def UIntToOH1(x: UInt): UInt = UIntToOH1(x, (1 << x.getWidth) - 1) def trailingZeros(x: Int): Option[Int] = if (x > 0) Some(log2Ceil(x & -x)) else None // Fill 1s from low bits to high bits def leftOR(x: UInt): UInt = leftOR(x, x.getWidth, x.getWidth) def leftOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x << s)(width-1,0)) helper(1, x)(width-1, 0) } // Fill 1s form high bits to low bits def rightOR(x: UInt): UInt = rightOR(x, x.getWidth, x.getWidth) def rightOR(x: UInt, width: Integer, cap: Integer = 999999): UInt = { val stop = min(width, cap) def helper(s: Int, x: UInt): UInt = if (s >= stop) x else helper(s+s, x | (x >> s)) helper(1, x)(width-1, 0) } def OptimizationBarrier[T <: Data](in: T): T = { val barrier = Module(new Module { val io = IO(new Bundle { val x = Input(chiselTypeOf(in)) val y = Output(chiselTypeOf(in)) }) io.y := io.x override def desiredName = s"OptimizationBarrier_${in.typeName}" }) barrier.io.x := in barrier.io.y } /** Similar to Seq.groupBy except this returns a Seq instead of a Map * Useful for deterministic code generation */ def groupByIntoSeq[A, K](xs: Seq[A])(f: A => K): immutable.Seq[(K, immutable.Seq[A])] = { val map = mutable.LinkedHashMap.empty[K, mutable.ListBuffer[A]] for (x <- xs) { val key = f(x) val l = map.getOrElseUpdate(key, mutable.ListBuffer.empty[A]) l += x } map.view.map({ case (k, vs) => k -> vs.toList }).toList } def heterogeneousOrGlobalSetting[T](in: Seq[T], n: Int): Seq[T] = in.size match { case 1 => List.fill(n)(in.head) case x if x == n => in case _ => throw new Exception(s"must provide exactly 1 or $n of some field, but got:\n$in") } // HeterogeneousBag moved to standalond diplomacy @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") def HeterogeneousBag[T <: Data](elts: Seq[T]) = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag[T](elts) @deprecated("HeterogeneousBag has been absorbed into standalone diplomacy library", "rocketchip 2.0.0") val HeterogeneousBag = _root_.org.chipsalliance.diplomacy.nodes.HeterogeneousBag } File Bundles.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import freechips.rocketchip.util._ import scala.collection.immutable.ListMap import chisel3.util.Decoupled import chisel3.util.DecoupledIO import chisel3.reflect.DataMirror abstract class TLBundleBase(val params: TLBundleParameters) extends Bundle // common combos in lazy policy: // Put + Acquire // Release + AccessAck object TLMessages { // A B C D E def PutFullData = 0.U // . . => AccessAck def PutPartialData = 1.U // . . => AccessAck def ArithmeticData = 2.U // . . => AccessAckData def LogicalData = 3.U // . . => AccessAckData def Get = 4.U // . . => AccessAckData def Hint = 5.U // . . => HintAck def AcquireBlock = 6.U // . => Grant[Data] def AcquirePerm = 7.U // . => Grant[Data] def Probe = 6.U // . => ProbeAck[Data] def AccessAck = 0.U // . . def AccessAckData = 1.U // . . def HintAck = 2.U // . . def ProbeAck = 4.U // . def ProbeAckData = 5.U // . def Release = 6.U // . => ReleaseAck def ReleaseData = 7.U // . => ReleaseAck def Grant = 4.U // . => GrantAck def GrantData = 5.U // . => GrantAck def ReleaseAck = 6.U // . def GrantAck = 0.U // . def isA(x: UInt) = x <= AcquirePerm def isB(x: UInt) = x <= Probe def isC(x: UInt) = x <= ReleaseData def isD(x: UInt) = x <= ReleaseAck def adResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, Grant, Grant) def bcResponse = VecInit(AccessAck, AccessAck, AccessAckData, AccessAckData, AccessAckData, HintAck, ProbeAck, ProbeAck) def a = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("AcquireBlock",TLPermissions.PermMsgGrow), ("AcquirePerm",TLPermissions.PermMsgGrow)) def b = Seq( ("PutFullData",TLPermissions.PermMsgReserved), ("PutPartialData",TLPermissions.PermMsgReserved), ("ArithmeticData",TLAtomics.ArithMsg), ("LogicalData",TLAtomics.LogicMsg), ("Get",TLPermissions.PermMsgReserved), ("Hint",TLHints.HintsMsg), ("Probe",TLPermissions.PermMsgCap)) def c = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("ProbeAck",TLPermissions.PermMsgReport), ("ProbeAckData",TLPermissions.PermMsgReport), ("Release",TLPermissions.PermMsgReport), ("ReleaseData",TLPermissions.PermMsgReport)) def d = Seq( ("AccessAck",TLPermissions.PermMsgReserved), ("AccessAckData",TLPermissions.PermMsgReserved), ("HintAck",TLPermissions.PermMsgReserved), ("Invalid Opcode",TLPermissions.PermMsgReserved), ("Grant",TLPermissions.PermMsgCap), ("GrantData",TLPermissions.PermMsgCap), ("ReleaseAck",TLPermissions.PermMsgReserved)) } /** * The three primary TileLink permissions are: * (T)runk: the agent is (or is on inwards path to) the global point of serialization. * (B)ranch: the agent is on an outwards path to * (N)one: * These permissions are permuted by transfer operations in various ways. * Operations can cap permissions, request for them to be grown or shrunk, * or for a report on their current status. */ object TLPermissions { val aWidth = 2 val bdWidth = 2 val cWidth = 3 // Cap types (Grant = new permissions, Probe = permisions <= target) def toT = 0.U(bdWidth.W) def toB = 1.U(bdWidth.W) def toN = 2.U(bdWidth.W) def isCap(x: UInt) = x <= toN // Grow types (Acquire = permissions >= target) def NtoB = 0.U(aWidth.W) def NtoT = 1.U(aWidth.W) def BtoT = 2.U(aWidth.W) def isGrow(x: UInt) = x <= BtoT // Shrink types (ProbeAck, Release) def TtoB = 0.U(cWidth.W) def TtoN = 1.U(cWidth.W) def BtoN = 2.U(cWidth.W) def isShrink(x: UInt) = x <= BtoN // Report types (ProbeAck, Release) def TtoT = 3.U(cWidth.W) def BtoB = 4.U(cWidth.W) def NtoN = 5.U(cWidth.W) def isReport(x: UInt) = x <= NtoN def PermMsgGrow:Seq[String] = Seq("Grow NtoB", "Grow NtoT", "Grow BtoT") def PermMsgCap:Seq[String] = Seq("Cap toT", "Cap toB", "Cap toN") def PermMsgReport:Seq[String] = Seq("Shrink TtoB", "Shrink TtoN", "Shrink BtoN", "Report TotT", "Report BtoB", "Report NtoN") def PermMsgReserved:Seq[String] = Seq("Reserved") } object TLAtomics { val width = 3 // Arithmetic types def MIN = 0.U(width.W) def MAX = 1.U(width.W) def MINU = 2.U(width.W) def MAXU = 3.U(width.W) def ADD = 4.U(width.W) def isArithmetic(x: UInt) = x <= ADD // Logical types def XOR = 0.U(width.W) def OR = 1.U(width.W) def AND = 2.U(width.W) def SWAP = 3.U(width.W) def isLogical(x: UInt) = x <= SWAP def ArithMsg:Seq[String] = Seq("MIN", "MAX", "MINU", "MAXU", "ADD") def LogicMsg:Seq[String] = Seq("XOR", "OR", "AND", "SWAP") } object TLHints { val width = 1 def PREFETCH_READ = 0.U(width.W) def PREFETCH_WRITE = 1.U(width.W) def isHints(x: UInt) = x <= PREFETCH_WRITE def HintsMsg:Seq[String] = Seq("PrefetchRead", "PrefetchWrite") } sealed trait TLChannel extends TLBundleBase { val channelName: String } sealed trait TLDataChannel extends TLChannel sealed trait TLAddrChannel extends TLDataChannel final class TLBundleA(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleA_${params.shortName}" val channelName = "'A' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(List(TLAtomics.width, TLPermissions.aWidth, TLHints.width).max.W) // amo_opcode || grow perms || hint val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleB(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleB_${params.shortName}" val channelName = "'B' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val address = UInt(params.addressBits.W) // from // variable fields during multibeat: val mask = UInt((params.dataBits/8).W) val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleC(params: TLBundleParameters) extends TLBundleBase(params) with TLAddrChannel { override def typeName = s"TLBundleC_${params.shortName}" val channelName = "'C' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.cWidth.W) // shrink or report perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // from val address = UInt(params.addressBits.W) // to val user = BundleMap(params.requestFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleD(params: TLBundleParameters) extends TLBundleBase(params) with TLDataChannel { override def typeName = s"TLBundleD_${params.shortName}" val channelName = "'D' channel" // fixed fields during multibeat: val opcode = UInt(3.W) val param = UInt(TLPermissions.bdWidth.W) // cap perms val size = UInt(params.sizeBits.W) val source = UInt(params.sourceBits.W) // to val sink = UInt(params.sinkBits.W) // from val denied = Bool() // implies corrupt iff *Data val user = BundleMap(params.responseFields) val echo = BundleMap(params.echoFields) // variable fields during multibeat: val data = UInt(params.dataBits.W) val corrupt = Bool() // only applies to *Data messages } final class TLBundleE(params: TLBundleParameters) extends TLBundleBase(params) with TLChannel { override def typeName = s"TLBundleE_${params.shortName}" val channelName = "'E' channel" val sink = UInt(params.sinkBits.W) // to } class TLBundle(val params: TLBundleParameters) extends Record { // Emulate a Bundle with elements abcde or ad depending on params.hasBCE private val optA = Some (Decoupled(new TLBundleA(params))) private val optB = params.hasBCE.option(Flipped(Decoupled(new TLBundleB(params)))) private val optC = params.hasBCE.option(Decoupled(new TLBundleC(params))) private val optD = Some (Flipped(Decoupled(new TLBundleD(params)))) private val optE = params.hasBCE.option(Decoupled(new TLBundleE(params))) def a: DecoupledIO[TLBundleA] = optA.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleA(params))))) def b: DecoupledIO[TLBundleB] = optB.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleB(params))))) def c: DecoupledIO[TLBundleC] = optC.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleC(params))))) def d: DecoupledIO[TLBundleD] = optD.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleD(params))))) def e: DecoupledIO[TLBundleE] = optE.getOrElse(WireDefault(0.U.asTypeOf(Decoupled(new TLBundleE(params))))) val elements = if (params.hasBCE) ListMap("e" -> e, "d" -> d, "c" -> c, "b" -> b, "a" -> a) else ListMap("d" -> d, "a" -> a) def tieoff(): Unit = { DataMirror.specifiedDirectionOf(a.ready) match { case SpecifiedDirection.Input => a.ready := false.B c.ready := false.B e.ready := false.B b.valid := false.B d.valid := false.B case SpecifiedDirection.Output => a.valid := false.B c.valid := false.B e.valid := false.B b.ready := false.B d.ready := false.B case _ => } } } object TLBundle { def apply(params: TLBundleParameters) = new TLBundle(params) } class TLAsyncBundleBase(val params: TLAsyncBundleParameters) extends Bundle class TLAsyncBundle(params: TLAsyncBundleParameters) extends TLAsyncBundleBase(params) { val a = new AsyncBundle(new TLBundleA(params.base), params.async) val b = Flipped(new AsyncBundle(new TLBundleB(params.base), params.async)) val c = new AsyncBundle(new TLBundleC(params.base), params.async) val d = Flipped(new AsyncBundle(new TLBundleD(params.base), params.async)) val e = new AsyncBundle(new TLBundleE(params.base), params.async) } class TLRationalBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = RationalIO(new TLBundleA(params)) val b = Flipped(RationalIO(new TLBundleB(params))) val c = RationalIO(new TLBundleC(params)) val d = Flipped(RationalIO(new TLBundleD(params))) val e = RationalIO(new TLBundleE(params)) } class TLCreditedBundle(params: TLBundleParameters) extends TLBundleBase(params) { val a = CreditedIO(new TLBundleA(params)) val b = Flipped(CreditedIO(new TLBundleB(params))) val c = CreditedIO(new TLBundleC(params)) val d = Flipped(CreditedIO(new TLBundleD(params))) val e = CreditedIO(new TLBundleE(params)) } File Parameters.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.diplomacy import chisel3._ import chisel3.util.{DecoupledIO, Queue, ReadyValidIO, isPow2, log2Ceil, log2Floor} import freechips.rocketchip.util.ShiftQueue /** Options for describing the attributes of memory regions */ object RegionType { // Define the 'more relaxed than' ordering val cases = Seq(CACHED, TRACKED, UNCACHED, IDEMPOTENT, VOLATILE, PUT_EFFECTS, GET_EFFECTS) sealed trait T extends Ordered[T] { def compare(that: T): Int = cases.indexOf(that) compare cases.indexOf(this) } case object CACHED extends T // an intermediate agent may have cached a copy of the region for you case object TRACKED extends T // the region may have been cached by another master, but coherence is being provided case object UNCACHED extends T // the region has not been cached yet, but should be cached when possible case object IDEMPOTENT extends T // gets return most recently put content, but content should not be cached case object VOLATILE extends T // content may change without a put, but puts and gets have no side effects case object PUT_EFFECTS extends T // puts produce side effects and so must not be combined/delayed case object GET_EFFECTS extends T // gets produce side effects and so must not be issued speculatively } // A non-empty half-open range; [start, end) case class IdRange(start: Int, end: Int) extends Ordered[IdRange] { require (start >= 0, s"Ids cannot be negative, but got: $start.") require (start <= end, "Id ranges cannot be negative.") def compare(x: IdRange) = { val primary = (this.start - x.start).signum val secondary = (x.end - this.end).signum if (primary != 0) primary else secondary } def overlaps(x: IdRange) = start < x.end && x.start < end def contains(x: IdRange) = start <= x.start && x.end <= end def contains(x: Int) = start <= x && x < end def contains(x: UInt) = if (size == 0) { false.B } else if (size == 1) { // simple comparison x === start.U } else { // find index of largest different bit val largestDeltaBit = log2Floor(start ^ (end-1)) val smallestCommonBit = largestDeltaBit + 1 // may not exist in x val uncommonMask = (1 << smallestCommonBit) - 1 val uncommonBits = (x | 0.U(smallestCommonBit.W))(largestDeltaBit, 0) // the prefix must match exactly (note: may shift ALL bits away) (x >> smallestCommonBit) === (start >> smallestCommonBit).U && // firrtl constant prop range analysis can eliminate these two: (start & uncommonMask).U <= uncommonBits && uncommonBits <= ((end-1) & uncommonMask).U } def shift(x: Int) = IdRange(start+x, end+x) def size = end - start def isEmpty = end == start def range = start until end } object IdRange { def overlaps(s: Seq[IdRange]) = if (s.isEmpty) None else { val ranges = s.sorted (ranges.tail zip ranges.init) find { case (a, b) => a overlaps b } } } // An potentially empty inclusive range of 2-powers [min, max] (in bytes) case class TransferSizes(min: Int, max: Int) { def this(x: Int) = this(x, x) require (min <= max, s"Min transfer $min > max transfer $max") require (min >= 0 && max >= 0, s"TransferSizes must be positive, got: ($min, $max)") require (max == 0 || isPow2(max), s"TransferSizes must be a power of 2, got: $max") require (min == 0 || isPow2(min), s"TransferSizes must be a power of 2, got: $min") require (max == 0 || min != 0, s"TransferSize 0 is forbidden unless (0,0), got: ($min, $max)") def none = min == 0 def contains(x: Int) = isPow2(x) && min <= x && x <= max def containsLg(x: Int) = contains(1 << x) def containsLg(x: UInt) = if (none) false.B else if (min == max) { log2Ceil(min).U === x } else { log2Ceil(min).U <= x && x <= log2Ceil(max).U } def contains(x: TransferSizes) = x.none || (min <= x.min && x.max <= max) def intersect(x: TransferSizes) = if (x.max < min || max < x.min) TransferSizes.none else TransferSizes(scala.math.max(min, x.min), scala.math.min(max, x.max)) // Not a union, because the result may contain sizes contained by neither term // NOT TO BE CONFUSED WITH COVERPOINTS def mincover(x: TransferSizes) = { if (none) { x } else if (x.none) { this } else { TransferSizes(scala.math.min(min, x.min), scala.math.max(max, x.max)) } } override def toString() = "TransferSizes[%d, %d]".format(min, max) } object TransferSizes { def apply(x: Int) = new TransferSizes(x) val none = new TransferSizes(0) def mincover(seq: Seq[TransferSizes]) = seq.foldLeft(none)(_ mincover _) def intersect(seq: Seq[TransferSizes]) = seq.reduce(_ intersect _) implicit def asBool(x: TransferSizes) = !x.none } // AddressSets specify the address space managed by the manager // Base is the base address, and mask are the bits consumed by the manager // e.g: base=0x200, mask=0xff describes a device managing 0x200-0x2ff // e.g: base=0x1000, mask=0xf0f decribes a device managing 0x1000-0x100f, 0x1100-0x110f, ... case class AddressSet(base: BigInt, mask: BigInt) extends Ordered[AddressSet] { // Forbid misaligned base address (and empty sets) require ((base & mask) == 0, s"Mis-aligned AddressSets are forbidden, got: ${this.toString}") require (base >= 0, s"AddressSet negative base is ambiguous: $base") // TL2 address widths are not fixed => negative is ambiguous // We do allow negative mask (=> ignore all high bits) def contains(x: BigInt) = ((x ^ base) & ~mask) == 0 def contains(x: UInt) = ((x ^ base.U).zext & (~mask).S) === 0.S // turn x into an address contained in this set def legalize(x: UInt): UInt = base.U | (mask.U & x) // overlap iff bitwise: both care (~mask0 & ~mask1) => both equal (base0=base1) def overlaps(x: AddressSet) = (~(mask | x.mask) & (base ^ x.base)) == 0 // contains iff bitwise: x.mask => mask && contains(x.base) def contains(x: AddressSet) = ((x.mask | (base ^ x.base)) & ~mask) == 0 // The number of bytes to which the manager must be aligned def alignment = ((mask + 1) & ~mask) // Is this a contiguous memory range def contiguous = alignment == mask+1 def finite = mask >= 0 def max = { require (finite, "Max cannot be calculated on infinite mask"); base | mask } // Widen the match function to ignore all bits in imask def widen(imask: BigInt) = AddressSet(base & ~imask, mask | imask) // Return an AddressSet that only contains the addresses both sets contain def intersect(x: AddressSet): Option[AddressSet] = { if (!overlaps(x)) { None } else { val r_mask = mask & x.mask val r_base = base | x.base Some(AddressSet(r_base, r_mask)) } } def subtract(x: AddressSet): Seq[AddressSet] = { intersect(x) match { case None => Seq(this) case Some(remove) => AddressSet.enumerateBits(mask & ~remove.mask).map { bit => val nmask = (mask & (bit-1)) | remove.mask val nbase = (remove.base ^ bit) & ~nmask AddressSet(nbase, nmask) } } } // AddressSets have one natural Ordering (the containment order, if contiguous) def compare(x: AddressSet) = { val primary = (this.base - x.base).signum // smallest address first val secondary = (x.mask - this.mask).signum // largest mask first if (primary != 0) primary else secondary } // We always want to see things in hex override def toString() = { if (mask >= 0) { "AddressSet(0x%x, 0x%x)".format(base, mask) } else { "AddressSet(0x%x, ~0x%x)".format(base, ~mask) } } def toRanges = { require (finite, "Ranges cannot be calculated on infinite mask") val size = alignment val fragments = mask & ~(size-1) val bits = bitIndexes(fragments) (BigInt(0) until (BigInt(1) << bits.size)).map { i => val off = bitIndexes(i).foldLeft(base) { case (a, b) => a.setBit(bits(b)) } AddressRange(off, size) } } } object AddressSet { val everything = AddressSet(0, -1) def misaligned(base: BigInt, size: BigInt, tail: Seq[AddressSet] = Seq()): Seq[AddressSet] = { if (size == 0) tail.reverse else { val maxBaseAlignment = base & (-base) // 0 for infinite (LSB) val maxSizeAlignment = BigInt(1) << log2Floor(size) // MSB of size val step = if (maxBaseAlignment == 0 || maxBaseAlignment > maxSizeAlignment) maxSizeAlignment else maxBaseAlignment misaligned(base+step, size-step, AddressSet(base, step-1) +: tail) } } def unify(seq: Seq[AddressSet], bit: BigInt): Seq[AddressSet] = { // Pair terms up by ignoring 'bit' seq.distinct.groupBy(x => x.copy(base = x.base & ~bit)).map { case (key, seq) => if (seq.size == 1) { seq.head // singleton -> unaffected } else { key.copy(mask = key.mask | bit) // pair - widen mask by bit } }.toList } def unify(seq: Seq[AddressSet]): Seq[AddressSet] = { val bits = seq.map(_.base).foldLeft(BigInt(0))(_ | _) AddressSet.enumerateBits(bits).foldLeft(seq) { case (acc, bit) => unify(acc, bit) }.sorted } def enumerateMask(mask: BigInt): Seq[BigInt] = { def helper(id: BigInt, tail: Seq[BigInt]): Seq[BigInt] = if (id == mask) (id +: tail).reverse else helper(((~mask | id) + 1) & mask, id +: tail) helper(0, Nil) } def enumerateBits(mask: BigInt): Seq[BigInt] = { def helper(x: BigInt): Seq[BigInt] = { if (x == 0) { Nil } else { val bit = x & (-x) bit +: helper(x & ~bit) } } helper(mask) } } case class BufferParams(depth: Int, flow: Boolean, pipe: Boolean) { require (depth >= 0, "Buffer depth must be >= 0") def isDefined = depth > 0 def latency = if (isDefined && !flow) 1 else 0 def apply[T <: Data](x: DecoupledIO[T]) = if (isDefined) Queue(x, depth, flow=flow, pipe=pipe) else x def irrevocable[T <: Data](x: ReadyValidIO[T]) = if (isDefined) Queue.irrevocable(x, depth, flow=flow, pipe=pipe) else x def sq[T <: Data](x: DecoupledIO[T]) = if (!isDefined) x else { val sq = Module(new ShiftQueue(x.bits, depth, flow=flow, pipe=pipe)) sq.io.enq <> x sq.io.deq } override def toString() = "BufferParams:%d%s%s".format(depth, if (flow) "F" else "", if (pipe) "P" else "") } object BufferParams { implicit def apply(depth: Int): BufferParams = BufferParams(depth, false, false) val default = BufferParams(2) val none = BufferParams(0) val flow = BufferParams(1, true, false) val pipe = BufferParams(1, false, true) } case class TriStateValue(value: Boolean, set: Boolean) { def update(orig: Boolean) = if (set) value else orig } object TriStateValue { implicit def apply(value: Boolean): TriStateValue = TriStateValue(value, true) def unset = TriStateValue(false, false) } trait DirectedBuffers[T] { def copyIn(x: BufferParams): T def copyOut(x: BufferParams): T def copyInOut(x: BufferParams): T } trait IdMapEntry { def name: String def from: IdRange def to: IdRange def isCache: Boolean def requestFifo: Boolean def maxTransactionsInFlight: Option[Int] def pretty(fmt: String) = if (from ne to) { // if the subclass uses the same reference for both from and to, assume its format string has an arity of 5 fmt.format(to.start, to.end, from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } else { fmt.format(from.start, from.end, s""""$name"""", if (isCache) " [CACHE]" else "", if (requestFifo) " [FIFO]" else "") } } abstract class IdMap[T <: IdMapEntry] { protected val fmt: String val mapping: Seq[T] def pretty: String = mapping.map(_.pretty(fmt)).mkString(",\n") } File Edges.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.tilelink import chisel3._ import chisel3.util._ import chisel3.experimental.SourceInfo import org.chipsalliance.cde.config.Parameters import freechips.rocketchip.util._ class TLEdge( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdgeParameters(client, manager, params, sourceInfo) { def isAligned(address: UInt, lgSize: UInt): Bool = { if (maxLgSize == 0) true.B else { val mask = UIntToOH1(lgSize, maxLgSize) (address & mask) === 0.U } } def mask(address: UInt, lgSize: UInt): UInt = MaskGen(address, lgSize, manager.beatBytes) def staticHasData(bundle: TLChannel): Option[Boolean] = { bundle match { case _:TLBundleA => { // Do there exist A messages with Data? val aDataYes = manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportPutFull || manager.anySupportPutPartial // Do there exist A messages without Data? val aDataNo = manager.anySupportAcquireB || manager.anySupportGet || manager.anySupportHint // Statically optimize the case where hasData is a constant if (!aDataYes) Some(false) else if (!aDataNo) Some(true) else None } case _:TLBundleB => { // Do there exist B messages with Data? val bDataYes = client.anySupportArithmetic || client.anySupportLogical || client.anySupportPutFull || client.anySupportPutPartial // Do there exist B messages without Data? val bDataNo = client.anySupportProbe || client.anySupportGet || client.anySupportHint // Statically optimize the case where hasData is a constant if (!bDataYes) Some(false) else if (!bDataNo) Some(true) else None } case _:TLBundleC => { // Do there eixst C messages with Data? val cDataYes = client.anySupportGet || client.anySupportArithmetic || client.anySupportLogical || client.anySupportProbe // Do there exist C messages without Data? val cDataNo = client.anySupportPutFull || client.anySupportPutPartial || client.anySupportHint || client.anySupportProbe if (!cDataYes) Some(false) else if (!cDataNo) Some(true) else None } case _:TLBundleD => { // Do there eixst D messages with Data? val dDataYes = manager.anySupportGet || manager.anySupportArithmetic || manager.anySupportLogical || manager.anySupportAcquireB // Do there exist D messages without Data? val dDataNo = manager.anySupportPutFull || manager.anySupportPutPartial || manager.anySupportHint || manager.anySupportAcquireT if (!dDataYes) Some(false) else if (!dDataNo) Some(true) else None } case _:TLBundleE => Some(false) } } def isRequest(x: TLChannel): Bool = { x match { case a: TLBundleA => true.B case b: TLBundleB => true.B case c: TLBundleC => c.opcode(2) && c.opcode(1) // opcode === TLMessages.Release || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(2) && !d.opcode(1) // opcode === TLMessages.Grant || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } } def isResponse(x: TLChannel): Bool = { x match { case a: TLBundleA => false.B case b: TLBundleB => false.B case c: TLBundleC => !c.opcode(2) || !c.opcode(1) // opcode =/= TLMessages.Release && // opcode =/= TLMessages.ReleaseData case d: TLBundleD => true.B // Grant isResponse + isRequest case e: TLBundleE => true.B } } def hasData(x: TLChannel): Bool = { val opdata = x match { case a: TLBundleA => !a.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case b: TLBundleB => !b.opcode(2) // opcode === TLMessages.PutFullData || // opcode === TLMessages.PutPartialData || // opcode === TLMessages.ArithmeticData || // opcode === TLMessages.LogicalData case c: TLBundleC => c.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.ProbeAckData || // opcode === TLMessages.ReleaseData case d: TLBundleD => d.opcode(0) // opcode === TLMessages.AccessAckData || // opcode === TLMessages.GrantData case e: TLBundleE => false.B } staticHasData(x).map(_.B).getOrElse(opdata) } def opcode(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.opcode case b: TLBundleB => b.opcode case c: TLBundleC => c.opcode case d: TLBundleD => d.opcode } } def param(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.param case b: TLBundleB => b.param case c: TLBundleC => c.param case d: TLBundleD => d.param } } def size(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.size case b: TLBundleB => b.size case c: TLBundleC => c.size case d: TLBundleD => d.size } } def data(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.data case b: TLBundleB => b.data case c: TLBundleC => c.data case d: TLBundleD => d.data } } def corrupt(x: TLDataChannel): Bool = { x match { case a: TLBundleA => a.corrupt case b: TLBundleB => b.corrupt case c: TLBundleC => c.corrupt case d: TLBundleD => d.corrupt } } def mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.mask case b: TLBundleB => b.mask case c: TLBundleC => mask(c.address, c.size) } } def full_mask(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => mask(a.address, a.size) case b: TLBundleB => mask(b.address, b.size) case c: TLBundleC => mask(c.address, c.size) } } def address(x: TLAddrChannel): UInt = { x match { case a: TLBundleA => a.address case b: TLBundleB => b.address case c: TLBundleC => c.address } } def source(x: TLDataChannel): UInt = { x match { case a: TLBundleA => a.source case b: TLBundleB => b.source case c: TLBundleC => c.source case d: TLBundleD => d.source } } def addr_hi(x: UInt): UInt = x >> log2Ceil(manager.beatBytes) def addr_lo(x: UInt): UInt = if (manager.beatBytes == 1) 0.U else x(log2Ceil(manager.beatBytes)-1, 0) def addr_hi(x: TLAddrChannel): UInt = addr_hi(address(x)) def addr_lo(x: TLAddrChannel): UInt = addr_lo(address(x)) def numBeats(x: TLChannel): UInt = { x match { case _: TLBundleE => 1.U case bundle: TLDataChannel => { val hasData = this.hasData(bundle) val size = this.size(bundle) val cutoff = log2Ceil(manager.beatBytes) val small = if (manager.maxTransfer <= manager.beatBytes) true.B else size <= (cutoff).U val decode = UIntToOH(size, maxLgSize+1) >> cutoff Mux(hasData, decode | small.asUInt, 1.U) } } } def numBeats1(x: TLChannel): UInt = { x match { case _: TLBundleE => 0.U case bundle: TLDataChannel => { if (maxLgSize == 0) { 0.U } else { val decode = UIntToOH1(size(bundle), maxLgSize) >> log2Ceil(manager.beatBytes) Mux(hasData(bundle), decode, 0.U) } } } } def firstlastHelper(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val beats1 = numBeats1(bits) val counter = RegInit(0.U(log2Up(maxTransfer / manager.beatBytes).W)) val counter1 = counter - 1.U val first = counter === 0.U val last = counter === 1.U || beats1 === 0.U val done = last && fire val count = (beats1 & ~counter1) when (fire) { counter := Mux(first, beats1, counter1) } (first, last, done, count) } def first(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._1 def first(x: DecoupledIO[TLChannel]): Bool = first(x.bits, x.fire) def first(x: ValidIO[TLChannel]): Bool = first(x.bits, x.valid) def last(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._2 def last(x: DecoupledIO[TLChannel]): Bool = last(x.bits, x.fire) def last(x: ValidIO[TLChannel]): Bool = last(x.bits, x.valid) def done(bits: TLChannel, fire: Bool): Bool = firstlastHelper(bits, fire)._3 def done(x: DecoupledIO[TLChannel]): Bool = done(x.bits, x.fire) def done(x: ValidIO[TLChannel]): Bool = done(x.bits, x.valid) def firstlast(bits: TLChannel, fire: Bool): (Bool, Bool, Bool) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3) } def firstlast(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.fire) def firstlast(x: ValidIO[TLChannel]): (Bool, Bool, Bool) = firstlast(x.bits, x.valid) def count(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4) } def count(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.fire) def count(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = count(x.bits, x.valid) def addr_inc(bits: TLChannel, fire: Bool): (Bool, Bool, Bool, UInt) = { val r = firstlastHelper(bits, fire) (r._1, r._2, r._3, r._4 << log2Ceil(manager.beatBytes)) } def addr_inc(x: DecoupledIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.fire) def addr_inc(x: ValidIO[TLChannel]): (Bool, Bool, Bool, UInt) = addr_inc(x.bits, x.valid) // Does the request need T permissions to be executed? def needT(a: TLBundleA): Bool = { val acq_needT = MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLPermissions.NtoB -> false.B, TLPermissions.NtoT -> true.B, TLPermissions.BtoT -> true.B)) MuxLookup(a.opcode, WireDefault(Bool(), DontCare))(Array( TLMessages.PutFullData -> true.B, TLMessages.PutPartialData -> true.B, TLMessages.ArithmeticData -> true.B, TLMessages.LogicalData -> true.B, TLMessages.Get -> false.B, TLMessages.Hint -> MuxLookup(a.param, WireDefault(Bool(), DontCare))(Array( TLHints.PREFETCH_READ -> false.B, TLHints.PREFETCH_WRITE -> true.B)), TLMessages.AcquireBlock -> acq_needT, TLMessages.AcquirePerm -> acq_needT)) } // This is a very expensive circuit; use only if you really mean it! def inFlight(x: TLBundle): (UInt, UInt) = { val flight = RegInit(0.U(log2Ceil(3*client.endSourceId+1).W)) val bce = manager.anySupportAcquireB && client.anySupportProbe val (a_first, a_last, _) = firstlast(x.a) val (b_first, b_last, _) = firstlast(x.b) val (c_first, c_last, _) = firstlast(x.c) val (d_first, d_last, _) = firstlast(x.d) val (e_first, e_last, _) = firstlast(x.e) val (a_request, a_response) = (isRequest(x.a.bits), isResponse(x.a.bits)) val (b_request, b_response) = (isRequest(x.b.bits), isResponse(x.b.bits)) val (c_request, c_response) = (isRequest(x.c.bits), isResponse(x.c.bits)) val (d_request, d_response) = (isRequest(x.d.bits), isResponse(x.d.bits)) val (e_request, e_response) = (isRequest(x.e.bits), isResponse(x.e.bits)) val a_inc = x.a.fire && a_first && a_request val b_inc = x.b.fire && b_first && b_request val c_inc = x.c.fire && c_first && c_request val d_inc = x.d.fire && d_first && d_request val e_inc = x.e.fire && e_first && e_request val inc = Cat(Seq(a_inc, d_inc) ++ (if (bce) Seq(b_inc, c_inc, e_inc) else Nil)) val a_dec = x.a.fire && a_last && a_response val b_dec = x.b.fire && b_last && b_response val c_dec = x.c.fire && c_last && c_response val d_dec = x.d.fire && d_last && d_response val e_dec = x.e.fire && e_last && e_response val dec = Cat(Seq(a_dec, d_dec) ++ (if (bce) Seq(b_dec, c_dec, e_dec) else Nil)) val next_flight = flight + PopCount(inc) - PopCount(dec) flight := next_flight (flight, next_flight) } def prettySourceMapping(context: String): String = { s"TL-Source mapping for $context:\n${(new TLSourceIdMap(client)).pretty}\n" } } class TLEdgeOut( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { // Transfers def AcquireBlock(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquireBlock a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AcquirePerm(fromSource: UInt, toAddress: UInt, lgSize: UInt, growPermissions: UInt) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.AcquirePerm a.param := growPermissions a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.Release c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleC) = { require (manager.anySupportAcquireB, s"TileLink: No managers visible from this edge support Acquires, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsAcquireBFast(toAddress, lgSize) val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ReleaseData c.param := shrinkPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt (legal, c) } def Release(fromSource: UInt, toAddress: UInt, lgSize: UInt, shrinkPermissions: UInt, data: UInt): (Bool, TLBundleC) = Release(fromSource, toAddress, lgSize, shrinkPermissions, data, false.B) def ProbeAck(b: TLBundleB, reportPermissions: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAck c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def ProbeAck(b: TLBundleB, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(b.source, b.address, b.size, reportPermissions, data) def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt, corrupt: Bool): TLBundleC = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.ProbeAckData c.param := reportPermissions c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def ProbeAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, reportPermissions: UInt, data: UInt): TLBundleC = ProbeAck(fromSource, toAddress, lgSize, reportPermissions, data, false.B) def GrantAck(d: TLBundleD): TLBundleE = GrantAck(d.sink) def GrantAck(toSink: UInt): TLBundleE = { val e = Wire(new TLBundleE(bundle)) e.sink := toSink e } // Accesses def Get(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { require (manager.anySupportGet, s"TileLink: No managers visible from this edge support Gets, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsGetFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Get a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutFull, s"TileLink: No managers visible from this edge support Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutFullFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutFullData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleA) = Put(fromSource, toAddress, lgSize, data, mask, false.B) def Put(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleA) = { require (manager.anySupportPutPartial, s"TileLink: No managers visible from this edge support masked Puts, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsPutPartialFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.PutPartialData a.param := 0.U a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask a.data := data a.corrupt := corrupt (legal, a) } def Arithmetic(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B): (Bool, TLBundleA) = { require (manager.anySupportArithmetic, s"TileLink: No managers visible from this edge support arithmetic AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsArithmeticFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.ArithmeticData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Logical(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (manager.anySupportLogical, s"TileLink: No managers visible from this edge support logical AMOs, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsLogicalFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.LogicalData a.param := atomic a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := data a.corrupt := corrupt (legal, a) } def Hint(fromSource: UInt, toAddress: UInt, lgSize: UInt, param: UInt) = { require (manager.anySupportHint, s"TileLink: No managers visible from this edge support Hints, but one of these clients would try to request one: ${client.clients}") val legal = manager.supportsHintFast(toAddress, lgSize) val a = Wire(new TLBundleA(bundle)) a.opcode := TLMessages.Hint a.param := param a.size := lgSize a.source := fromSource a.address := toAddress a.user := DontCare a.echo := DontCare a.mask := mask(toAddress, lgSize) a.data := DontCare a.corrupt := false.B (legal, a) } def AccessAck(b: TLBundleB): TLBundleC = AccessAck(b.source, address(b), b.size) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } def AccessAck(b: TLBundleB, data: UInt): TLBundleC = AccessAck(b.source, address(b), b.size, data) def AccessAck(b: TLBundleB, data: UInt, corrupt: Bool): TLBundleC = AccessAck(b.source, address(b), b.size, data, corrupt) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt): TLBundleC = AccessAck(fromSource, toAddress, lgSize, data, false.B) def AccessAck(fromSource: UInt, toAddress: UInt, lgSize: UInt, data: UInt, corrupt: Bool) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.AccessAckData c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := data c.corrupt := corrupt c } def HintAck(b: TLBundleB): TLBundleC = HintAck(b.source, address(b), b.size) def HintAck(fromSource: UInt, toAddress: UInt, lgSize: UInt) = { val c = Wire(new TLBundleC(bundle)) c.opcode := TLMessages.HintAck c.param := 0.U c.size := lgSize c.source := fromSource c.address := toAddress c.user := DontCare c.echo := DontCare c.data := DontCare c.corrupt := false.B c } } class TLEdgeIn( client: TLClientPortParameters, manager: TLManagerPortParameters, params: Parameters, sourceInfo: SourceInfo) extends TLEdge(client, manager, params, sourceInfo) { private def myTranspose[T](x: Seq[Seq[T]]): Seq[Seq[T]] = { val todo = x.filter(!_.isEmpty) val heads = todo.map(_.head) val tails = todo.map(_.tail) if (todo.isEmpty) Nil else { heads +: myTranspose(tails) } } // Transfers def Probe(fromAddress: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt) = { require (client.anySupportProbe, s"TileLink: No clients visible from this edge support probes, but one of these managers tried to issue one: ${manager.managers}") val legal = client.supportsProbe(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Probe b.param := capPermissions b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.Grant d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt): TLBundleD = Grant(fromSink, toSource, lgSize, capPermissions, data, false.B, false.B) def Grant(fromSink: UInt, toSource: UInt, lgSize: UInt, capPermissions: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.GrantData d.param := capPermissions d.size := lgSize d.source := toSource d.sink := fromSink d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def ReleaseAck(c: TLBundleC): TLBundleD = ReleaseAck(c.source, c.size, false.B) def ReleaseAck(toSource: UInt, lgSize: UInt, denied: Bool): TLBundleD = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.ReleaseAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } // Accesses def Get(fromAddress: UInt, toSource: UInt, lgSize: UInt) = { require (client.anySupportGet, s"TileLink: No clients visible from this edge support Gets, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsGet(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Get b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutFull, s"TileLink: No clients visible from this edge support Puts, but one of these managers would try to issue one: ${manager.managers}") val legal = client.supportsPutFull(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutFullData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt): (Bool, TLBundleB) = Put(fromAddress, toSource, lgSize, data, mask, false.B) def Put(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, mask: UInt, corrupt: Bool): (Bool, TLBundleB) = { require (client.anySupportPutPartial, s"TileLink: No clients visible from this edge support masked Puts, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsPutPartial(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.PutPartialData b.param := 0.U b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask b.data := data b.corrupt := corrupt (legal, b) } def Arithmetic(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportArithmetic, s"TileLink: No clients visible from this edge support arithmetic AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsArithmetic(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.ArithmeticData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Logical(fromAddress: UInt, toSource: UInt, lgSize: UInt, data: UInt, atomic: UInt, corrupt: Bool = false.B) = { require (client.anySupportLogical, s"TileLink: No clients visible from this edge support logical AMOs, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsLogical(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.LogicalData b.param := atomic b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := data b.corrupt := corrupt (legal, b) } def Hint(fromAddress: UInt, toSource: UInt, lgSize: UInt, param: UInt) = { require (client.anySupportHint, s"TileLink: No clients visible from this edge support Hints, but one of these managers would try to request one: ${manager.managers}") val legal = client.supportsHint(toSource, lgSize) val b = Wire(new TLBundleB(bundle)) b.opcode := TLMessages.Hint b.param := param b.size := lgSize b.source := toSource b.address := fromAddress b.mask := mask(fromAddress, lgSize) b.data := DontCare b.corrupt := false.B (legal, b) } def AccessAck(a: TLBundleA): TLBundleD = AccessAck(a.source, a.size) def AccessAck(a: TLBundleA, denied: Bool): TLBundleD = AccessAck(a.source, a.size, denied) def AccessAck(toSource: UInt, lgSize: UInt): TLBundleD = AccessAck(toSource, lgSize, false.B) def AccessAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } def AccessAck(a: TLBundleA, data: UInt): TLBundleD = AccessAck(a.source, a.size, data) def AccessAck(a: TLBundleA, data: UInt, denied: Bool, corrupt: Bool): TLBundleD = AccessAck(a.source, a.size, data, denied, corrupt) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt): TLBundleD = AccessAck(toSource, lgSize, data, false.B, false.B) def AccessAck(toSource: UInt, lgSize: UInt, data: UInt, denied: Bool, corrupt: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.AccessAckData d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := data d.corrupt := corrupt d } def HintAck(a: TLBundleA): TLBundleD = HintAck(a, false.B) def HintAck(a: TLBundleA, denied: Bool): TLBundleD = HintAck(a.source, a.size, denied) def HintAck(toSource: UInt, lgSize: UInt): TLBundleD = HintAck(toSource, lgSize, false.B) def HintAck(toSource: UInt, lgSize: UInt, denied: Bool) = { val d = Wire(new TLBundleD(bundle)) d.opcode := TLMessages.HintAck d.param := 0.U d.size := lgSize d.source := toSource d.sink := 0.U d.denied := denied d.user := DontCare d.echo := DontCare d.data := DontCare d.corrupt := false.B d } }
module TLMonitor_18( // @[Monitor.scala:36:7] input clock, // @[Monitor.scala:36:7] input reset, // @[Monitor.scala:36:7] input io_in_a_ready, // @[Monitor.scala:20:14] input io_in_a_valid, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_opcode, // @[Monitor.scala:20:14] input [2:0] io_in_a_bits_param, // @[Monitor.scala:20:14] input [1:0] io_in_a_bits_size, // @[Monitor.scala:20:14] input [10:0] io_in_a_bits_source, // @[Monitor.scala:20:14] input [12:0] io_in_a_bits_address, // @[Monitor.scala:20:14] input [7:0] io_in_a_bits_mask, // @[Monitor.scala:20:14] input [63:0] io_in_a_bits_data, // @[Monitor.scala:20:14] input io_in_a_bits_corrupt, // @[Monitor.scala:20:14] input io_in_d_ready, // @[Monitor.scala:20:14] input io_in_d_valid, // @[Monitor.scala:20:14] input [2:0] io_in_d_bits_opcode, // @[Monitor.scala:20:14] input [1:0] io_in_d_bits_size, // @[Monitor.scala:20:14] input [10:0] io_in_d_bits_source, // @[Monitor.scala:20:14] input [63:0] io_in_d_bits_data // @[Monitor.scala:20:14] ); wire [31:0] _plusarg_reader_1_out; // @[PlusArg.scala:80:11] wire [31:0] _plusarg_reader_out; // @[PlusArg.scala:80:11] wire io_in_a_ready_0 = io_in_a_ready; // @[Monitor.scala:36:7] wire io_in_a_valid_0 = io_in_a_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_opcode_0 = io_in_a_bits_opcode; // @[Monitor.scala:36:7] wire [2:0] io_in_a_bits_param_0 = io_in_a_bits_param; // @[Monitor.scala:36:7] wire [1:0] io_in_a_bits_size_0 = io_in_a_bits_size; // @[Monitor.scala:36:7] wire [10:0] io_in_a_bits_source_0 = io_in_a_bits_source; // @[Monitor.scala:36:7] wire [12:0] io_in_a_bits_address_0 = io_in_a_bits_address; // @[Monitor.scala:36:7] wire [7:0] io_in_a_bits_mask_0 = io_in_a_bits_mask; // @[Monitor.scala:36:7] wire [63:0] io_in_a_bits_data_0 = io_in_a_bits_data; // @[Monitor.scala:36:7] wire io_in_a_bits_corrupt_0 = io_in_a_bits_corrupt; // @[Monitor.scala:36:7] wire io_in_d_ready_0 = io_in_d_ready; // @[Monitor.scala:36:7] wire io_in_d_valid_0 = io_in_d_valid; // @[Monitor.scala:36:7] wire [2:0] io_in_d_bits_opcode_0 = io_in_d_bits_opcode; // @[Monitor.scala:36:7] wire [1:0] io_in_d_bits_size_0 = io_in_d_bits_size; // @[Monitor.scala:36:7] wire [10:0] io_in_d_bits_source_0 = io_in_d_bits_source; // @[Monitor.scala:36:7] wire [63:0] io_in_d_bits_data_0 = io_in_d_bits_data; // @[Monitor.scala:36:7] wire io_in_d_bits_sink = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_denied = 1'h0; // @[Monitor.scala:36:7] wire io_in_d_bits_corrupt = 1'h0; // @[Monitor.scala:36:7] wire _source_ok_T = 1'h0; // @[Parameters.scala:54:10] wire _source_ok_T_6 = 1'h0; // @[Parameters.scala:54:10] wire sink_ok = 1'h0; // @[Monitor.scala:309:31] wire a_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire a_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire a_first_count = 1'h0; // @[Edges.scala:234:25] wire d_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire d_first_count = 1'h0; // @[Edges.scala:234:25] wire a_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59] wire a_first_beats1_1 = 1'h0; // @[Edges.scala:221:14] wire a_first_count_1 = 1'h0; // @[Edges.scala:234:25] wire d_first_beats1_decode_1 = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1_1 = 1'h0; // @[Edges.scala:221:14] wire d_first_count_1 = 1'h0; // @[Edges.scala:234:25] wire _c_first_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_first_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_first_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_first_T = 1'h0; // @[Decoupled.scala:51:35] wire c_first_beats1_decode = 1'h0; // @[Edges.scala:220:59] wire c_first_beats1_opdata = 1'h0; // @[Edges.scala:102:36] wire c_first_beats1 = 1'h0; // @[Edges.scala:221:14] wire _c_first_last_T = 1'h0; // @[Edges.scala:232:25] wire c_first_done = 1'h0; // @[Edges.scala:233:22] wire _c_first_count_T = 1'h0; // @[Edges.scala:234:27] wire c_first_count = 1'h0; // @[Edges.scala:234:25] wire _c_first_counter_T = 1'h0; // @[Edges.scala:236:21] wire d_first_beats1_decode_2 = 1'h0; // @[Edges.scala:220:59] wire d_first_beats1_2 = 1'h0; // @[Edges.scala:221:14] wire d_first_count_2 = 1'h0; // @[Edges.scala:234:25] wire _c_set_wo_ready_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_wo_ready_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_wo_ready_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_interm_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_interm_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_opcodes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_opcodes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_sizes_set_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_sizes_set_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T = 1'h0; // @[Monitor.scala:772:47] wire _c_probe_ack_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _c_probe_ack_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _c_probe_ack_T_1 = 1'h0; // @[Monitor.scala:772:95] wire c_probe_ack = 1'h0; // @[Monitor.scala:772:71] wire _same_cycle_resp_WIRE_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_1_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_1_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_3 = 1'h0; // @[Monitor.scala:795:44] wire _same_cycle_resp_WIRE_2_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_2_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_3_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_3_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_T_4 = 1'h0; // @[Edges.scala:68:36] wire _same_cycle_resp_T_5 = 1'h0; // @[Edges.scala:68:51] wire _same_cycle_resp_T_6 = 1'h0; // @[Edges.scala:68:40] wire _same_cycle_resp_T_7 = 1'h0; // @[Monitor.scala:795:55] wire _same_cycle_resp_WIRE_4_ready = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_valid = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_4_bits_corrupt = 1'h0; // @[Bundles.scala:265:74] wire _same_cycle_resp_WIRE_5_ready = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_valid = 1'h0; // @[Bundles.scala:265:61] wire _same_cycle_resp_WIRE_5_bits_corrupt = 1'h0; // @[Bundles.scala:265:61] wire same_cycle_resp_1 = 1'h0; // @[Monitor.scala:795:88] wire _source_ok_T_1 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_2 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_3 = 1'h1; // @[Parameters.scala:54:67] wire _source_ok_T_7 = 1'h1; // @[Parameters.scala:54:32] wire _source_ok_T_8 = 1'h1; // @[Parameters.scala:56:32] wire _source_ok_T_9 = 1'h1; // @[Parameters.scala:54:67] wire _a_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire a_first_last = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire d_first_last = 1'h1; // @[Edges.scala:232:33] wire _a_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire a_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_3 = 1'h1; // @[Edges.scala:232:43] wire d_first_last_1 = 1'h1; // @[Edges.scala:232:33] wire c_first_counter1 = 1'h1; // @[Edges.scala:230:28] wire c_first = 1'h1; // @[Edges.scala:231:25] wire _c_first_last_T_1 = 1'h1; // @[Edges.scala:232:43] wire c_first_last = 1'h1; // @[Edges.scala:232:33] wire _d_first_last_T_5 = 1'h1; // @[Edges.scala:232:43] wire d_first_last_2 = 1'h1; // @[Edges.scala:232:33] wire [1:0] _c_first_counter1_T = 2'h3; // @[Edges.scala:230:28] wire [1:0] io_in_d_bits_param = 2'h0; // @[Monitor.scala:36:7] wire [1:0] _c_first_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_first_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_first_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_first_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_set_wo_ready_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_set_wo_ready_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_opcodes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_opcodes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_sizes_set_interm_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_sizes_set_interm_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_opcodes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_opcodes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_sizes_set_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_sizes_set_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_probe_ack_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_probe_ack_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _c_probe_ack_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _c_probe_ack_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_1_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_2_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_3_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [1:0] _same_cycle_resp_WIRE_4_bits_size = 2'h0; // @[Bundles.scala:265:74] wire [1:0] _same_cycle_resp_WIRE_5_bits_size = 2'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_first_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_first_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_wo_ready_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_wo_ready_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_interm_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_interm_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_opcodes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_opcodes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_sizes_set_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_sizes_set_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _c_probe_ack_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _c_probe_ack_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_1_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_2_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_3_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [63:0] _same_cycle_resp_WIRE_4_bits_data = 64'h0; // @[Bundles.scala:265:74] wire [63:0] _same_cycle_resp_WIRE_5_bits_data = 64'h0; // @[Bundles.scala:265:61] wire [12:0] _c_first_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_first_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_first_WIRE_2_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_first_WIRE_3_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_set_wo_ready_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_set_wo_ready_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_set_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_set_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_opcodes_set_interm_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_opcodes_set_interm_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_sizes_set_interm_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_sizes_set_interm_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_opcodes_set_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_opcodes_set_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_sizes_set_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_sizes_set_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_probe_ack_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_probe_ack_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _c_probe_ack_WIRE_2_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _c_probe_ack_WIRE_3_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _same_cycle_resp_WIRE_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _same_cycle_resp_WIRE_1_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _same_cycle_resp_WIRE_2_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _same_cycle_resp_WIRE_3_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [12:0] _same_cycle_resp_WIRE_4_bits_address = 13'h0; // @[Bundles.scala:265:74] wire [12:0] _same_cycle_resp_WIRE_5_bits_address = 13'h0; // @[Bundles.scala:265:61] wire [10:0] _c_first_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_first_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_first_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_first_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_set_wo_ready_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_set_wo_ready_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_opcodes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_opcodes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_sizes_set_interm_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_sizes_set_interm_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_opcodes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_opcodes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_sizes_set_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_sizes_set_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_probe_ack_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_probe_ack_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _c_probe_ack_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _c_probe_ack_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _same_cycle_resp_WIRE_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _same_cycle_resp_WIRE_1_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _same_cycle_resp_WIRE_2_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _same_cycle_resp_WIRE_3_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [10:0] _same_cycle_resp_WIRE_4_bits_source = 11'h0; // @[Bundles.scala:265:74] wire [10:0] _same_cycle_resp_WIRE_5_bits_source = 11'h0; // @[Bundles.scala:265:61] wire [2:0] responseMap_0 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMap_1 = 3'h0; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_0 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_1 = 3'h0; // @[Monitor.scala:644:42] wire [2:0] _c_first_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_first_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_first_beats1_decode_T_2 = 3'h0; // @[package.scala:243:46] wire [2:0] c_sizes_set_interm = 3'h0; // @[Monitor.scala:755:40] wire [2:0] _c_set_wo_ready_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_wo_ready_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_wo_ready_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_interm_T = 3'h0; // @[Monitor.scala:766:51] wire [2:0] _c_opcodes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_opcodes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_opcodes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_sizes_set_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_sizes_set_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _c_probe_ack_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _c_probe_ack_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_1_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_1_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_2_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_2_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_3_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_3_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_4_bits_opcode = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_4_bits_param = 3'h0; // @[Bundles.scala:265:74] wire [2:0] _same_cycle_resp_WIRE_5_bits_opcode = 3'h0; // @[Bundles.scala:265:61] wire [2:0] _same_cycle_resp_WIRE_5_bits_param = 3'h0; // @[Bundles.scala:265:61] wire [15:0] _a_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _a_size_lookup_T_5 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_opcodes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _d_sizes_clr_T_3 = 16'hF; // @[Monitor.scala:612:57] wire [15:0] _c_opcode_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _c_size_lookup_T_5 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_opcodes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [15:0] _d_sizes_clr_T_9 = 16'hF; // @[Monitor.scala:724:57] wire [16:0] _a_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _a_size_lookup_T_4 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_opcodes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _d_sizes_clr_T_2 = 17'hF; // @[Monitor.scala:612:57] wire [16:0] _c_opcode_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _c_size_lookup_T_4 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_opcodes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [16:0] _d_sizes_clr_T_8 = 17'hF; // @[Monitor.scala:724:57] wire [15:0] _a_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _a_size_lookup_T_3 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_opcodes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _d_sizes_clr_T_1 = 16'h10; // @[Monitor.scala:612:51] wire [15:0] _c_opcode_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _c_size_lookup_T_3 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_opcodes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [15:0] _d_sizes_clr_T_7 = 16'h10; // @[Monitor.scala:724:51] wire [16385:0] _c_sizes_set_T_1 = 16386'h0; // @[Monitor.scala:768:52] wire [13:0] _c_opcodes_set_T = 14'h0; // @[Monitor.scala:767:79] wire [13:0] _c_sizes_set_T = 14'h0; // @[Monitor.scala:768:77] wire [16386:0] _c_opcodes_set_T_1 = 16387'h0; // @[Monitor.scala:767:54] wire [2:0] responseMap_2 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_3 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMap_4 = 3'h1; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_2 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_3 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_4 = 3'h1; // @[Monitor.scala:644:42] wire [2:0] _c_sizes_set_interm_T_1 = 3'h1; // @[Monitor.scala:766:59] wire [3:0] _c_opcodes_set_interm_T_1 = 4'h1; // @[Monitor.scala:765:61] wire [3:0] c_opcodes_set_interm = 4'h0; // @[Monitor.scala:754:40] wire [3:0] _c_opcodes_set_interm_T = 4'h0; // @[Monitor.scala:765:53] wire [2047:0] _c_set_wo_ready_T = 2048'h1; // @[OneHot.scala:58:35] wire [2047:0] _c_set_T = 2048'h1; // @[OneHot.scala:58:35] wire [4159:0] c_opcodes_set = 4160'h0; // @[Monitor.scala:740:34] wire [4159:0] c_sizes_set = 4160'h0; // @[Monitor.scala:741:34] wire [1039:0] c_set = 1040'h0; // @[Monitor.scala:738:34] wire [1039:0] c_set_wo_ready = 1040'h0; // @[Monitor.scala:739:34] wire [2:0] _c_first_beats1_decode_T_1 = 3'h7; // @[package.scala:243:76] wire [5:0] _c_first_beats1_decode_T = 6'h7; // @[package.scala:243:71] wire [2:0] responseMap_6 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMap_7 = 3'h4; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_7 = 3'h4; // @[Monitor.scala:644:42] wire [2:0] responseMapSecondOption_6 = 3'h5; // @[Monitor.scala:644:42] wire [2:0] responseMap_5 = 3'h2; // @[Monitor.scala:643:42] wire [2:0] responseMapSecondOption_5 = 3'h2; // @[Monitor.scala:644:42] wire [3:0] _a_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:637:123] wire [3:0] _a_size_lookup_T_2 = 4'h4; // @[Monitor.scala:641:117] wire [3:0] _d_opcodes_clr_T = 4'h4; // @[Monitor.scala:680:48] wire [3:0] _d_sizes_clr_T = 4'h4; // @[Monitor.scala:681:48] wire [3:0] _c_opcode_lookup_T_2 = 4'h4; // @[Monitor.scala:749:123] wire [3:0] _c_size_lookup_T_2 = 4'h4; // @[Monitor.scala:750:119] wire [3:0] _d_opcodes_clr_T_6 = 4'h4; // @[Monitor.scala:790:48] wire [3:0] _d_sizes_clr_T_6 = 4'h4; // @[Monitor.scala:791:48] wire [10:0] _source_ok_uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_1 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_2 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_3 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_4 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_5 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_6 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_7 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _uncommonBits_T_8 = io_in_a_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] _source_ok_uncommonBits_T_1 = io_in_d_bits_source_0; // @[Monitor.scala:36:7] wire [10:0] source_ok_uncommonBits = _source_ok_uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_4 = source_ok_uncommonBits < 11'h410; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_5 = _source_ok_T_4; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_0 = _source_ok_T_5; // @[Parameters.scala:1138:31] wire [5:0] _GEN = 6'h7 << io_in_a_bits_size_0; // @[package.scala:243:71] wire [5:0] _is_aligned_mask_T; // @[package.scala:243:71] assign _is_aligned_mask_T = _GEN; // @[package.scala:243:71] wire [5:0] _a_first_beats1_decode_T; // @[package.scala:243:71] assign _a_first_beats1_decode_T = _GEN; // @[package.scala:243:71] wire [5:0] _a_first_beats1_decode_T_3; // @[package.scala:243:71] assign _a_first_beats1_decode_T_3 = _GEN; // @[package.scala:243:71] wire [2:0] _is_aligned_mask_T_1 = _is_aligned_mask_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] is_aligned_mask = ~_is_aligned_mask_T_1; // @[package.scala:243:{46,76}] wire [12:0] _is_aligned_T = {10'h0, io_in_a_bits_address_0[2:0] & is_aligned_mask}; // @[package.scala:243:46] wire is_aligned = _is_aligned_T == 13'h0; // @[Edges.scala:21:{16,24}] wire [2:0] _mask_sizeOH_T = {1'h0, io_in_a_bits_size_0}; // @[Misc.scala:202:34] wire [1:0] mask_sizeOH_shiftAmount = _mask_sizeOH_T[1:0]; // @[OneHot.scala:64:49] wire [3:0] _mask_sizeOH_T_1 = 4'h1 << mask_sizeOH_shiftAmount; // @[OneHot.scala:64:49, :65:12] wire [2:0] _mask_sizeOH_T_2 = _mask_sizeOH_T_1[2:0]; // @[OneHot.scala:65:{12,27}] wire [2:0] mask_sizeOH = {_mask_sizeOH_T_2[2:1], 1'h1}; // @[OneHot.scala:65:27] wire mask_sub_sub_sub_0_1 = &io_in_a_bits_size_0; // @[Misc.scala:206:21] wire mask_sub_sub_size = mask_sizeOH[2]; // @[Misc.scala:202:81, :209:26] wire mask_sub_sub_bit = io_in_a_bits_address_0[2]; // @[Misc.scala:210:26] wire mask_sub_sub_1_2 = mask_sub_sub_bit; // @[Misc.scala:210:26, :214:27] wire mask_sub_sub_nbit = ~mask_sub_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_sub_0_2 = mask_sub_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_sub_acc_T = mask_sub_sub_size & mask_sub_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_0_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T; // @[Misc.scala:206:21, :215:{29,38}] wire _mask_sub_sub_acc_T_1 = mask_sub_sub_size & mask_sub_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_sub_1_1 = mask_sub_sub_sub_0_1 | _mask_sub_sub_acc_T_1; // @[Misc.scala:206:21, :215:{29,38}] wire mask_sub_size = mask_sizeOH[1]; // @[Misc.scala:202:81, :209:26] wire mask_sub_bit = io_in_a_bits_address_0[1]; // @[Misc.scala:210:26] wire mask_sub_nbit = ~mask_sub_bit; // @[Misc.scala:210:26, :211:20] wire mask_sub_0_2 = mask_sub_sub_0_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T = mask_sub_size & mask_sub_0_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_0_1 = mask_sub_sub_0_1 | _mask_sub_acc_T; // @[Misc.scala:215:{29,38}] wire mask_sub_1_2 = mask_sub_sub_0_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_1 = mask_sub_size & mask_sub_1_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_1_1 = mask_sub_sub_0_1 | _mask_sub_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_sub_2_2 = mask_sub_sub_1_2 & mask_sub_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_sub_acc_T_2 = mask_sub_size & mask_sub_2_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_2_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_sub_3_2 = mask_sub_sub_1_2 & mask_sub_bit; // @[Misc.scala:210:26, :214:27] wire _mask_sub_acc_T_3 = mask_sub_size & mask_sub_3_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_sub_3_1 = mask_sub_sub_1_1 | _mask_sub_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_size = mask_sizeOH[0]; // @[Misc.scala:202:81, :209:26] wire mask_bit = io_in_a_bits_address_0[0]; // @[Misc.scala:210:26] wire mask_nbit = ~mask_bit; // @[Misc.scala:210:26, :211:20] wire mask_eq = mask_sub_0_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T = mask_size & mask_eq; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc = mask_sub_0_1 | _mask_acc_T; // @[Misc.scala:215:{29,38}] wire mask_eq_1 = mask_sub_0_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_1 = mask_size & mask_eq_1; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_1 = mask_sub_0_1 | _mask_acc_T_1; // @[Misc.scala:215:{29,38}] wire mask_eq_2 = mask_sub_1_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_2 = mask_size & mask_eq_2; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_2 = mask_sub_1_1 | _mask_acc_T_2; // @[Misc.scala:215:{29,38}] wire mask_eq_3 = mask_sub_1_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_3 = mask_size & mask_eq_3; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_3 = mask_sub_1_1 | _mask_acc_T_3; // @[Misc.scala:215:{29,38}] wire mask_eq_4 = mask_sub_2_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_4 = mask_size & mask_eq_4; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_4 = mask_sub_2_1 | _mask_acc_T_4; // @[Misc.scala:215:{29,38}] wire mask_eq_5 = mask_sub_2_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_5 = mask_size & mask_eq_5; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_5 = mask_sub_2_1 | _mask_acc_T_5; // @[Misc.scala:215:{29,38}] wire mask_eq_6 = mask_sub_3_2 & mask_nbit; // @[Misc.scala:211:20, :214:27] wire _mask_acc_T_6 = mask_size & mask_eq_6; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_6 = mask_sub_3_1 | _mask_acc_T_6; // @[Misc.scala:215:{29,38}] wire mask_eq_7 = mask_sub_3_2 & mask_bit; // @[Misc.scala:210:26, :214:27] wire _mask_acc_T_7 = mask_size & mask_eq_7; // @[Misc.scala:209:26, :214:27, :215:38] wire mask_acc_7 = mask_sub_3_1 | _mask_acc_T_7; // @[Misc.scala:215:{29,38}] wire [1:0] mask_lo_lo = {mask_acc_1, mask_acc}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_lo_hi = {mask_acc_3, mask_acc_2}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_lo = {mask_lo_hi, mask_lo_lo}; // @[Misc.scala:222:10] wire [1:0] mask_hi_lo = {mask_acc_5, mask_acc_4}; // @[Misc.scala:215:29, :222:10] wire [1:0] mask_hi_hi = {mask_acc_7, mask_acc_6}; // @[Misc.scala:215:29, :222:10] wire [3:0] mask_hi = {mask_hi_hi, mask_hi_lo}; // @[Misc.scala:222:10] wire [7:0] mask = {mask_hi, mask_lo}; // @[Misc.scala:222:10] wire [10:0] uncommonBits = _uncommonBits_T; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_1 = _uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_2 = _uncommonBits_T_2; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_3 = _uncommonBits_T_3; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_4 = _uncommonBits_T_4; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_5 = _uncommonBits_T_5; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_6 = _uncommonBits_T_6; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_7 = _uncommonBits_T_7; // @[Parameters.scala:52:{29,56}] wire [10:0] uncommonBits_8 = _uncommonBits_T_8; // @[Parameters.scala:52:{29,56}] wire [10:0] source_ok_uncommonBits_1 = _source_ok_uncommonBits_T_1; // @[Parameters.scala:52:{29,56}] wire _source_ok_T_10 = source_ok_uncommonBits_1 < 11'h410; // @[Parameters.scala:52:56, :57:20] wire _source_ok_T_11 = _source_ok_T_10; // @[Parameters.scala:56:48, :57:20] wire _source_ok_WIRE_1_0 = _source_ok_T_11; // @[Parameters.scala:1138:31] wire _T_665 = io_in_a_ready_0 & io_in_a_valid_0; // @[Decoupled.scala:51:35] wire _a_first_T; // @[Decoupled.scala:51:35] assign _a_first_T = _T_665; // @[Decoupled.scala:51:35] wire _a_first_T_1; // @[Decoupled.scala:51:35] assign _a_first_T_1 = _T_665; // @[Decoupled.scala:51:35] wire a_first_done = _a_first_T; // @[Decoupled.scala:51:35] wire [2:0] _a_first_beats1_decode_T_1 = _a_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _a_first_beats1_decode_T_2 = ~_a_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire _a_first_beats1_opdata_T = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire _a_first_beats1_opdata_T_1 = io_in_a_bits_opcode_0[2]; // @[Monitor.scala:36:7] wire a_first_beats1_opdata = ~_a_first_beats1_opdata_T; // @[Edges.scala:92:{28,37}] reg a_first_counter; // @[Edges.scala:229:27] wire _a_first_last_T = a_first_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _a_first_counter1_T = {1'h0, a_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire a_first_counter1 = _a_first_counter1_T[0]; // @[Edges.scala:230:28] wire a_first = ~a_first_counter; // @[Edges.scala:229:27, :231:25] wire _a_first_count_T = ~a_first_counter1; // @[Edges.scala:230:28, :234:27] wire _a_first_counter_T = ~a_first & a_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode; // @[Monitor.scala:387:22] reg [2:0] param; // @[Monitor.scala:388:22] reg [1:0] size; // @[Monitor.scala:389:22] reg [10:0] source; // @[Monitor.scala:390:22] reg [12:0] address; // @[Monitor.scala:391:22] wire _T_733 = io_in_d_ready_0 & io_in_d_valid_0; // @[Decoupled.scala:51:35] wire _d_first_T; // @[Decoupled.scala:51:35] assign _d_first_T = _T_733; // @[Decoupled.scala:51:35] wire _d_first_T_1; // @[Decoupled.scala:51:35] assign _d_first_T_1 = _T_733; // @[Decoupled.scala:51:35] wire _d_first_T_2; // @[Decoupled.scala:51:35] assign _d_first_T_2 = _T_733; // @[Decoupled.scala:51:35] wire d_first_done = _d_first_T; // @[Decoupled.scala:51:35] wire [5:0] _GEN_0 = 6'h7 << io_in_d_bits_size_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T; // @[package.scala:243:71] assign _d_first_beats1_decode_T = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_3; // @[package.scala:243:71] assign _d_first_beats1_decode_T_3 = _GEN_0; // @[package.scala:243:71] wire [5:0] _d_first_beats1_decode_T_6; // @[package.scala:243:71] assign _d_first_beats1_decode_T_6 = _GEN_0; // @[package.scala:243:71] wire [2:0] _d_first_beats1_decode_T_1 = _d_first_beats1_decode_T[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_2 = ~_d_first_beats1_decode_T_1; // @[package.scala:243:{46,76}] wire d_first_beats1_opdata = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_1 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] wire d_first_beats1_opdata_2 = io_in_d_bits_opcode_0[0]; // @[Monitor.scala:36:7] reg d_first_counter; // @[Edges.scala:229:27] wire _d_first_last_T = d_first_counter; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T = {1'h0, d_first_counter} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1 = _d_first_counter1_T[0]; // @[Edges.scala:230:28] wire d_first = ~d_first_counter; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T = ~d_first_counter1; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T = ~d_first & d_first_counter1; // @[Edges.scala:230:28, :231:25, :236:21] reg [2:0] opcode_1; // @[Monitor.scala:538:22] reg [1:0] size_1; // @[Monitor.scala:540:22] reg [10:0] source_1; // @[Monitor.scala:541:22] reg [1039:0] inflight; // @[Monitor.scala:614:27] reg [4159:0] inflight_opcodes; // @[Monitor.scala:616:35] reg [4159:0] inflight_sizes; // @[Monitor.scala:618:33] wire a_first_done_1 = _a_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _a_first_beats1_decode_T_4 = _a_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _a_first_beats1_decode_T_5 = ~_a_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] wire a_first_beats1_opdata_1 = ~_a_first_beats1_opdata_T_1; // @[Edges.scala:92:{28,37}] reg a_first_counter_1; // @[Edges.scala:229:27] wire _a_first_last_T_2 = a_first_counter_1; // @[Edges.scala:229:27, :232:25] wire [1:0] _a_first_counter1_T_1 = {1'h0, a_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28] wire a_first_counter1_1 = _a_first_counter1_T_1[0]; // @[Edges.scala:230:28] wire a_first_1 = ~a_first_counter_1; // @[Edges.scala:229:27, :231:25] wire _a_first_count_T_1 = ~a_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire _a_first_counter_T_1 = ~a_first_1 & a_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire d_first_done_1 = _d_first_T_1; // @[Decoupled.scala:51:35] wire [2:0] _d_first_beats1_decode_T_4 = _d_first_beats1_decode_T_3[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_5 = ~_d_first_beats1_decode_T_4; // @[package.scala:243:{46,76}] reg d_first_counter_1; // @[Edges.scala:229:27] wire _d_first_last_T_2 = d_first_counter_1; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T_1 = {1'h0, d_first_counter_1} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1_1 = _d_first_counter1_T_1[0]; // @[Edges.scala:230:28] wire d_first_1 = ~d_first_counter_1; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T_1 = ~d_first_counter1_1; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T_1 = ~d_first_1 & d_first_counter1_1; // @[Edges.scala:230:28, :231:25, :236:21] wire [1039:0] a_set; // @[Monitor.scala:626:34] wire [1039:0] a_set_wo_ready; // @[Monitor.scala:627:34] wire [4159:0] a_opcodes_set; // @[Monitor.scala:630:33] wire [4159:0] a_sizes_set; // @[Monitor.scala:632:31] wire [2:0] a_opcode_lookup; // @[Monitor.scala:635:35] wire [13:0] _GEN_1 = {1'h0, io_in_d_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :637:69] wire [13:0] _a_opcode_lookup_T; // @[Monitor.scala:637:69] assign _a_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69] wire [13:0] _a_size_lookup_T; // @[Monitor.scala:641:65] assign _a_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :641:65] wire [13:0] _d_opcodes_clr_T_4; // @[Monitor.scala:680:101] assign _d_opcodes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :680:101] wire [13:0] _d_sizes_clr_T_4; // @[Monitor.scala:681:99] assign _d_sizes_clr_T_4 = _GEN_1; // @[Monitor.scala:637:69, :681:99] wire [13:0] _c_opcode_lookup_T; // @[Monitor.scala:749:69] assign _c_opcode_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :749:69] wire [13:0] _c_size_lookup_T; // @[Monitor.scala:750:67] assign _c_size_lookup_T = _GEN_1; // @[Monitor.scala:637:69, :750:67] wire [13:0] _d_opcodes_clr_T_10; // @[Monitor.scala:790:101] assign _d_opcodes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :790:101] wire [13:0] _d_sizes_clr_T_10; // @[Monitor.scala:791:99] assign _d_sizes_clr_T_10 = _GEN_1; // @[Monitor.scala:637:69, :791:99] wire [4159:0] _a_opcode_lookup_T_1 = inflight_opcodes >> _a_opcode_lookup_T; // @[Monitor.scala:616:35, :637:{44,69}] wire [4159:0] _a_opcode_lookup_T_6 = {4156'h0, _a_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:637:{44,97}] wire [4159:0] _a_opcode_lookup_T_7 = {1'h0, _a_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:637:{97,152}] assign a_opcode_lookup = _a_opcode_lookup_T_7[2:0]; // @[Monitor.scala:635:35, :637:{21,152}] wire [3:0] a_size_lookup; // @[Monitor.scala:639:33] wire [4159:0] _a_size_lookup_T_1 = inflight_sizes >> _a_size_lookup_T; // @[Monitor.scala:618:33, :641:{40,65}] wire [4159:0] _a_size_lookup_T_6 = {4156'h0, _a_size_lookup_T_1[3:0]}; // @[Monitor.scala:641:{40,91}] wire [4159:0] _a_size_lookup_T_7 = {1'h0, _a_size_lookup_T_6[4159:1]}; // @[Monitor.scala:641:{91,144}] assign a_size_lookup = _a_size_lookup_T_7[3:0]; // @[Monitor.scala:639:33, :641:{19,144}] wire [3:0] a_opcodes_set_interm; // @[Monitor.scala:646:40] wire [2:0] a_sizes_set_interm; // @[Monitor.scala:648:38] wire _same_cycle_resp_T = io_in_a_valid_0 & a_first_1; // @[Monitor.scala:36:7, :651:26, :684:44] wire [2047:0] _GEN_2 = 2048'h1 << io_in_a_bits_source_0; // @[OneHot.scala:58:35] wire [2047:0] _a_set_wo_ready_T; // @[OneHot.scala:58:35] assign _a_set_wo_ready_T = _GEN_2; // @[OneHot.scala:58:35] wire [2047:0] _a_set_T; // @[OneHot.scala:58:35] assign _a_set_T = _GEN_2; // @[OneHot.scala:58:35] assign a_set_wo_ready = _same_cycle_resp_T ? _a_set_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire _T_598 = _T_665 & a_first_1; // @[Decoupled.scala:51:35] assign a_set = _T_598 ? _a_set_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire [3:0] _a_opcodes_set_interm_T = {io_in_a_bits_opcode_0, 1'h0}; // @[Monitor.scala:36:7, :657:53] wire [3:0] _a_opcodes_set_interm_T_1 = {_a_opcodes_set_interm_T[3:1], 1'h1}; // @[Monitor.scala:657:{53,61}] assign a_opcodes_set_interm = _T_598 ? _a_opcodes_set_interm_T_1 : 4'h0; // @[Monitor.scala:646:40, :655:{25,70}, :657:{28,61}] wire [2:0] _a_sizes_set_interm_T = {io_in_a_bits_size_0, 1'h0}; // @[Monitor.scala:36:7, :658:51] wire [2:0] _a_sizes_set_interm_T_1 = {_a_sizes_set_interm_T[2:1], 1'h1}; // @[Monitor.scala:658:{51,59}] assign a_sizes_set_interm = _T_598 ? _a_sizes_set_interm_T_1 : 3'h0; // @[Monitor.scala:648:38, :655:{25,70}, :658:{28,59}] wire [13:0] _GEN_3 = {1'h0, io_in_a_bits_source_0, 2'h0}; // @[Monitor.scala:36:7, :659:79] wire [13:0] _a_opcodes_set_T; // @[Monitor.scala:659:79] assign _a_opcodes_set_T = _GEN_3; // @[Monitor.scala:659:79] wire [13:0] _a_sizes_set_T; // @[Monitor.scala:660:77] assign _a_sizes_set_T = _GEN_3; // @[Monitor.scala:659:79, :660:77] wire [16386:0] _a_opcodes_set_T_1 = {16383'h0, a_opcodes_set_interm} << _a_opcodes_set_T; // @[Monitor.scala:646:40, :659:{54,79}] assign a_opcodes_set = _T_598 ? _a_opcodes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:630:33, :655:{25,70}, :659:{28,54}] wire [16385:0] _a_sizes_set_T_1 = {16383'h0, a_sizes_set_interm} << _a_sizes_set_T; // @[Monitor.scala:648:38, :659:54, :660:{52,77}] assign a_sizes_set = _T_598 ? _a_sizes_set_T_1[4159:0] : 4160'h0; // @[Monitor.scala:632:31, :655:{25,70}, :660:{28,52}] wire [1039:0] d_clr; // @[Monitor.scala:664:34] wire [1039:0] d_clr_wo_ready; // @[Monitor.scala:665:34] wire [4159:0] d_opcodes_clr; // @[Monitor.scala:668:33] wire [4159:0] d_sizes_clr; // @[Monitor.scala:670:31] wire _GEN_4 = io_in_d_bits_opcode_0 == 3'h6; // @[Monitor.scala:36:7, :673:46] wire d_release_ack; // @[Monitor.scala:673:46] assign d_release_ack = _GEN_4; // @[Monitor.scala:673:46] wire d_release_ack_1; // @[Monitor.scala:783:46] assign d_release_ack_1 = _GEN_4; // @[Monitor.scala:673:46, :783:46] wire _T_644 = io_in_d_valid_0 & d_first_1; // @[Monitor.scala:36:7, :674:26] wire [2047:0] _GEN_5 = 2048'h1 << io_in_d_bits_source_0; // @[OneHot.scala:58:35] wire [2047:0] _d_clr_wo_ready_T; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T = _GEN_5; // @[OneHot.scala:58:35] wire [2047:0] _d_clr_T; // @[OneHot.scala:58:35] assign _d_clr_T = _GEN_5; // @[OneHot.scala:58:35] wire [2047:0] _d_clr_wo_ready_T_1; // @[OneHot.scala:58:35] assign _d_clr_wo_ready_T_1 = _GEN_5; // @[OneHot.scala:58:35] wire [2047:0] _d_clr_T_1; // @[OneHot.scala:58:35] assign _d_clr_T_1 = _GEN_5; // @[OneHot.scala:58:35] assign d_clr_wo_ready = _T_644 & ~d_release_ack ? _d_clr_wo_ready_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire _T_613 = _T_733 & d_first_1 & ~d_release_ack; // @[Decoupled.scala:51:35] assign d_clr = _T_613 ? _d_clr_T[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire [16398:0] _d_opcodes_clr_T_5 = 16399'hF << _d_opcodes_clr_T_4; // @[Monitor.scala:680:{76,101}] assign d_opcodes_clr = _T_613 ? _d_opcodes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:668:33, :678:{25,70,89}, :680:{21,76}] wire [16398:0] _d_sizes_clr_T_5 = 16399'hF << _d_sizes_clr_T_4; // @[Monitor.scala:681:{74,99}] assign d_sizes_clr = _T_613 ? _d_sizes_clr_T_5[4159:0] : 4160'h0; // @[Monitor.scala:670:31, :678:{25,70,89}, :681:{21,74}] wire _same_cycle_resp_T_1 = _same_cycle_resp_T; // @[Monitor.scala:684:{44,55}] wire _same_cycle_resp_T_2 = io_in_a_bits_source_0 == io_in_d_bits_source_0; // @[Monitor.scala:36:7, :684:113] wire same_cycle_resp = _same_cycle_resp_T_1 & _same_cycle_resp_T_2; // @[Monitor.scala:684:{55,88,113}] wire [1039:0] _inflight_T = inflight | a_set; // @[Monitor.scala:614:27, :626:34, :705:27] wire [1039:0] _inflight_T_1 = ~d_clr; // @[Monitor.scala:664:34, :705:38] wire [1039:0] _inflight_T_2 = _inflight_T & _inflight_T_1; // @[Monitor.scala:705:{27,36,38}] wire [4159:0] _inflight_opcodes_T = inflight_opcodes | a_opcodes_set; // @[Monitor.scala:616:35, :630:33, :706:43] wire [4159:0] _inflight_opcodes_T_1 = ~d_opcodes_clr; // @[Monitor.scala:668:33, :706:62] wire [4159:0] _inflight_opcodes_T_2 = _inflight_opcodes_T & _inflight_opcodes_T_1; // @[Monitor.scala:706:{43,60,62}] wire [4159:0] _inflight_sizes_T = inflight_sizes | a_sizes_set; // @[Monitor.scala:618:33, :632:31, :707:39] wire [4159:0] _inflight_sizes_T_1 = ~d_sizes_clr; // @[Monitor.scala:670:31, :707:56] wire [4159:0] _inflight_sizes_T_2 = _inflight_sizes_T & _inflight_sizes_T_1; // @[Monitor.scala:707:{39,54,56}] reg [31:0] watchdog; // @[Monitor.scala:709:27] wire [32:0] _watchdog_T = {1'h0, watchdog} + 33'h1; // @[Monitor.scala:709:27, :714:26] wire [31:0] _watchdog_T_1 = _watchdog_T[31:0]; // @[Monitor.scala:714:26] reg [1039:0] inflight_1; // @[Monitor.scala:726:35] wire [1039:0] _inflight_T_3 = inflight_1; // @[Monitor.scala:726:35, :814:35] reg [4159:0] inflight_opcodes_1; // @[Monitor.scala:727:35] wire [4159:0] _inflight_opcodes_T_3 = inflight_opcodes_1; // @[Monitor.scala:727:35, :815:43] reg [4159:0] inflight_sizes_1; // @[Monitor.scala:728:35] wire [4159:0] _inflight_sizes_T_3 = inflight_sizes_1; // @[Monitor.scala:728:35, :816:41] wire d_first_done_2 = _d_first_T_2; // @[Decoupled.scala:51:35] wire [2:0] _d_first_beats1_decode_T_7 = _d_first_beats1_decode_T_6[2:0]; // @[package.scala:243:{71,76}] wire [2:0] _d_first_beats1_decode_T_8 = ~_d_first_beats1_decode_T_7; // @[package.scala:243:{46,76}] reg d_first_counter_2; // @[Edges.scala:229:27] wire _d_first_last_T_4 = d_first_counter_2; // @[Edges.scala:229:27, :232:25] wire [1:0] _d_first_counter1_T_2 = {1'h0, d_first_counter_2} - 2'h1; // @[Edges.scala:229:27, :230:28] wire d_first_counter1_2 = _d_first_counter1_T_2[0]; // @[Edges.scala:230:28] wire d_first_2 = ~d_first_counter_2; // @[Edges.scala:229:27, :231:25] wire _d_first_count_T_2 = ~d_first_counter1_2; // @[Edges.scala:230:28, :234:27] wire _d_first_counter_T_2 = ~d_first_2 & d_first_counter1_2; // @[Edges.scala:230:28, :231:25, :236:21] wire [3:0] c_opcode_lookup; // @[Monitor.scala:747:35] wire [3:0] c_size_lookup; // @[Monitor.scala:748:35] wire [4159:0] _c_opcode_lookup_T_1 = inflight_opcodes_1 >> _c_opcode_lookup_T; // @[Monitor.scala:727:35, :749:{44,69}] wire [4159:0] _c_opcode_lookup_T_6 = {4156'h0, _c_opcode_lookup_T_1[3:0]}; // @[Monitor.scala:749:{44,97}] wire [4159:0] _c_opcode_lookup_T_7 = {1'h0, _c_opcode_lookup_T_6[4159:1]}; // @[Monitor.scala:749:{97,152}] assign c_opcode_lookup = _c_opcode_lookup_T_7[3:0]; // @[Monitor.scala:747:35, :749:{21,152}] wire [4159:0] _c_size_lookup_T_1 = inflight_sizes_1 >> _c_size_lookup_T; // @[Monitor.scala:728:35, :750:{42,67}] wire [4159:0] _c_size_lookup_T_6 = {4156'h0, _c_size_lookup_T_1[3:0]}; // @[Monitor.scala:750:{42,93}] wire [4159:0] _c_size_lookup_T_7 = {1'h0, _c_size_lookup_T_6[4159:1]}; // @[Monitor.scala:750:{93,146}] assign c_size_lookup = _c_size_lookup_T_7[3:0]; // @[Monitor.scala:748:35, :750:{21,146}] wire [1039:0] d_clr_1; // @[Monitor.scala:774:34] wire [1039:0] d_clr_wo_ready_1; // @[Monitor.scala:775:34] wire [4159:0] d_opcodes_clr_1; // @[Monitor.scala:776:34] wire [4159:0] d_sizes_clr_1; // @[Monitor.scala:777:34] wire _T_709 = io_in_d_valid_0 & d_first_2; // @[Monitor.scala:36:7, :784:26] assign d_clr_wo_ready_1 = _T_709 & d_release_ack_1 ? _d_clr_wo_ready_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire _T_691 = _T_733 & d_first_2 & d_release_ack_1; // @[Decoupled.scala:51:35] assign d_clr_1 = _T_691 ? _d_clr_T_1[1039:0] : 1040'h0; // @[OneHot.scala:58:35] wire [16398:0] _d_opcodes_clr_T_11 = 16399'hF << _d_opcodes_clr_T_10; // @[Monitor.scala:790:{76,101}] assign d_opcodes_clr_1 = _T_691 ? _d_opcodes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:776:34, :788:{25,70,88}, :790:{21,76}] wire [16398:0] _d_sizes_clr_T_11 = 16399'hF << _d_sizes_clr_T_10; // @[Monitor.scala:791:{74,99}] assign d_sizes_clr_1 = _T_691 ? _d_sizes_clr_T_11[4159:0] : 4160'h0; // @[Monitor.scala:777:34, :788:{25,70,88}, :791:{21,74}] wire _same_cycle_resp_T_8 = io_in_d_bits_source_0 == 11'h0; // @[Monitor.scala:36:7, :795:113] wire [1039:0] _inflight_T_4 = ~d_clr_1; // @[Monitor.scala:774:34, :814:46] wire [1039:0] _inflight_T_5 = _inflight_T_3 & _inflight_T_4; // @[Monitor.scala:814:{35,44,46}] wire [4159:0] _inflight_opcodes_T_4 = ~d_opcodes_clr_1; // @[Monitor.scala:776:34, :815:62] wire [4159:0] _inflight_opcodes_T_5 = _inflight_opcodes_T_3 & _inflight_opcodes_T_4; // @[Monitor.scala:815:{43,60,62}] wire [4159:0] _inflight_sizes_T_4 = ~d_sizes_clr_1; // @[Monitor.scala:777:34, :816:58] wire [4159:0] _inflight_sizes_T_5 = _inflight_sizes_T_3 & _inflight_sizes_T_4; // @[Monitor.scala:816:{41,56,58}] reg [31:0] watchdog_1; // @[Monitor.scala:818:27]
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File recFNFromFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ object recFNFromFN { def apply(expWidth: Int, sigWidth: Int, in: Bits) = { val rawIn = rawFloatFromFN(expWidth, sigWidth, in) rawIn.sign ## (Mux(rawIn.isZero, 0.U(3.W), rawIn.sExp(expWidth, expWidth - 2)) | Mux(rawIn.isNaN, 1.U, 0.U)) ## rawIn.sExp(expWidth - 3, 0) ## rawIn.sig(sigWidth - 2, 0) } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } } File fNFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ object fNFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits) = { val minNormExp = (BigInt(1)<<(expWidth - 1)) + 2 val rawIn = rawFloatFromRecFN(expWidth, sigWidth, in) val isSubnormal = rawIn.sExp < minNormExp.S val denormShiftDist = 1.U - rawIn.sExp(log2Up(sigWidth - 1) - 1, 0) val denormFract = ((rawIn.sig>>1)>>denormShiftDist)(sigWidth - 2, 0) val expOut = Mux(isSubnormal, 0.U, rawIn.sExp(expWidth - 1, 0) - ((BigInt(1)<<(expWidth - 1)) + 1).U ) | Fill(expWidth, rawIn.isNaN || rawIn.isInf) val fractOut = Mux(isSubnormal, denormFract, Mux(rawIn.isInf, 0.U, rawIn.sig(sigWidth - 2, 0)) ) Cat(rawIn.sign, expOut, fractOut) } } File rawFloatFromFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ object rawFloatFromFN { def apply(expWidth: Int, sigWidth: Int, in: Bits) = { val sign = in(expWidth + sigWidth - 1) val expIn = in(expWidth + sigWidth - 2, sigWidth - 1) val fractIn = in(sigWidth - 2, 0) val isZeroExpIn = (expIn === 0.U) val isZeroFractIn = (fractIn === 0.U) val normDist = countLeadingZeros(fractIn) val subnormFract = (fractIn << normDist) (sigWidth - 3, 0) << 1 val adjustedExp = Mux(isZeroExpIn, normDist ^ ((BigInt(1) << (expWidth + 1)) - 1).U, expIn ) + ((BigInt(1) << (expWidth - 1)).U | Mux(isZeroExpIn, 2.U, 1.U)) val isZero = isZeroExpIn && isZeroFractIn val isSpecial = adjustedExp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && !isZeroFractIn out.isInf := isSpecial && isZeroFractIn out.isZero := isZero out.sign := sign out.sExp := adjustedExp(expWidth, 0).zext out.sig := 0.U(1.W) ## !isZero ## Mux(isZeroExpIn, subnormFract, fractIn) out } } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } }
module MacUnit_11( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [31:0] io_in_a_bits, // @[PE.scala:16:14] input [31:0] io_in_b_bits, // @[PE.scala:16:14] input [31:0] io_in_c_bits, // @[PE.scala:16:14] output [31:0] io_out_d_bits // @[PE.scala:16:14] ); wire io_out_d_self_rec_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19] wire io_out_d_m2_rec_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19] wire io_out_d_m1_rec_rawIn_isNaN; // @[rawFloatFromFN.scala:63:19] wire [32:0] _io_out_d_muladder_io_out; // @[Arithmetic.scala:376:30] wire [32:0] _io_out_d_m2_resizer_io_out; // @[Arithmetic.scala:369:32] wire [32:0] _io_out_d_m1_resizer_io_out; // @[Arithmetic.scala:362:32] wire [31:0] io_in_a_bits_0 = io_in_a_bits; // @[PE.scala:14:7] wire [31:0] io_in_b_bits_0 = io_in_b_bits; // @[PE.scala:14:7] wire [31:0] io_in_c_bits_0 = io_in_c_bits; // @[PE.scala:14:7] wire [31:0] io_out_d_out_bits; // @[Arithmetic.scala:387:23] wire [31:0] io_out_d_bits_0; // @[PE.scala:14:7] wire io_out_d_m1_rec_rawIn_sign = io_in_a_bits_0[31]; // @[rawFloatFromFN.scala:44:18] wire io_out_d_m1_rec_rawIn_sign_0 = io_out_d_m1_rec_rawIn_sign; // @[rawFloatFromFN.scala:44:18, :63:19] wire [7:0] io_out_d_m1_rec_rawIn_expIn = io_in_a_bits_0[30:23]; // @[rawFloatFromFN.scala:45:19] wire [22:0] io_out_d_m1_rec_rawIn_fractIn = io_in_a_bits_0[22:0]; // @[rawFloatFromFN.scala:46:21] wire io_out_d_m1_rec_rawIn_isZeroExpIn = io_out_d_m1_rec_rawIn_expIn == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30] wire io_out_d_m1_rec_rawIn_isZeroFractIn = io_out_d_m1_rec_rawIn_fractIn == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34] wire _io_out_d_m1_rec_rawIn_normDist_T = io_out_d_m1_rec_rawIn_fractIn[0]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_1 = io_out_d_m1_rec_rawIn_fractIn[1]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_2 = io_out_d_m1_rec_rawIn_fractIn[2]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_3 = io_out_d_m1_rec_rawIn_fractIn[3]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_4 = io_out_d_m1_rec_rawIn_fractIn[4]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_5 = io_out_d_m1_rec_rawIn_fractIn[5]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_6 = io_out_d_m1_rec_rawIn_fractIn[6]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_7 = io_out_d_m1_rec_rawIn_fractIn[7]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_8 = io_out_d_m1_rec_rawIn_fractIn[8]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_9 = io_out_d_m1_rec_rawIn_fractIn[9]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_10 = io_out_d_m1_rec_rawIn_fractIn[10]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_11 = io_out_d_m1_rec_rawIn_fractIn[11]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_12 = io_out_d_m1_rec_rawIn_fractIn[12]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_13 = io_out_d_m1_rec_rawIn_fractIn[13]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_14 = io_out_d_m1_rec_rawIn_fractIn[14]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_15 = io_out_d_m1_rec_rawIn_fractIn[15]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_16 = io_out_d_m1_rec_rawIn_fractIn[16]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_17 = io_out_d_m1_rec_rawIn_fractIn[17]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_18 = io_out_d_m1_rec_rawIn_fractIn[18]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_19 = io_out_d_m1_rec_rawIn_fractIn[19]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_20 = io_out_d_m1_rec_rawIn_fractIn[20]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_21 = io_out_d_m1_rec_rawIn_fractIn[21]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m1_rec_rawIn_normDist_T_22 = io_out_d_m1_rec_rawIn_fractIn[22]; // @[rawFloatFromFN.scala:46:21] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_23 = _io_out_d_m1_rec_rawIn_normDist_T_1 ? 5'h15 : 5'h16; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_24 = _io_out_d_m1_rec_rawIn_normDist_T_2 ? 5'h14 : _io_out_d_m1_rec_rawIn_normDist_T_23; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_25 = _io_out_d_m1_rec_rawIn_normDist_T_3 ? 5'h13 : _io_out_d_m1_rec_rawIn_normDist_T_24; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_26 = _io_out_d_m1_rec_rawIn_normDist_T_4 ? 5'h12 : _io_out_d_m1_rec_rawIn_normDist_T_25; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_27 = _io_out_d_m1_rec_rawIn_normDist_T_5 ? 5'h11 : _io_out_d_m1_rec_rawIn_normDist_T_26; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_28 = _io_out_d_m1_rec_rawIn_normDist_T_6 ? 5'h10 : _io_out_d_m1_rec_rawIn_normDist_T_27; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_29 = _io_out_d_m1_rec_rawIn_normDist_T_7 ? 5'hF : _io_out_d_m1_rec_rawIn_normDist_T_28; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_30 = _io_out_d_m1_rec_rawIn_normDist_T_8 ? 5'hE : _io_out_d_m1_rec_rawIn_normDist_T_29; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_31 = _io_out_d_m1_rec_rawIn_normDist_T_9 ? 5'hD : _io_out_d_m1_rec_rawIn_normDist_T_30; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_32 = _io_out_d_m1_rec_rawIn_normDist_T_10 ? 5'hC : _io_out_d_m1_rec_rawIn_normDist_T_31; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_33 = _io_out_d_m1_rec_rawIn_normDist_T_11 ? 5'hB : _io_out_d_m1_rec_rawIn_normDist_T_32; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_34 = _io_out_d_m1_rec_rawIn_normDist_T_12 ? 5'hA : _io_out_d_m1_rec_rawIn_normDist_T_33; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_35 = _io_out_d_m1_rec_rawIn_normDist_T_13 ? 5'h9 : _io_out_d_m1_rec_rawIn_normDist_T_34; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_36 = _io_out_d_m1_rec_rawIn_normDist_T_14 ? 5'h8 : _io_out_d_m1_rec_rawIn_normDist_T_35; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_37 = _io_out_d_m1_rec_rawIn_normDist_T_15 ? 5'h7 : _io_out_d_m1_rec_rawIn_normDist_T_36; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_38 = _io_out_d_m1_rec_rawIn_normDist_T_16 ? 5'h6 : _io_out_d_m1_rec_rawIn_normDist_T_37; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_39 = _io_out_d_m1_rec_rawIn_normDist_T_17 ? 5'h5 : _io_out_d_m1_rec_rawIn_normDist_T_38; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_40 = _io_out_d_m1_rec_rawIn_normDist_T_18 ? 5'h4 : _io_out_d_m1_rec_rawIn_normDist_T_39; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_41 = _io_out_d_m1_rec_rawIn_normDist_T_19 ? 5'h3 : _io_out_d_m1_rec_rawIn_normDist_T_40; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_42 = _io_out_d_m1_rec_rawIn_normDist_T_20 ? 5'h2 : _io_out_d_m1_rec_rawIn_normDist_T_41; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m1_rec_rawIn_normDist_T_43 = _io_out_d_m1_rec_rawIn_normDist_T_21 ? 5'h1 : _io_out_d_m1_rec_rawIn_normDist_T_42; // @[Mux.scala:50:70] wire [4:0] io_out_d_m1_rec_rawIn_normDist = _io_out_d_m1_rec_rawIn_normDist_T_22 ? 5'h0 : _io_out_d_m1_rec_rawIn_normDist_T_43; // @[Mux.scala:50:70] wire [53:0] _io_out_d_m1_rec_rawIn_subnormFract_T = {31'h0, io_out_d_m1_rec_rawIn_fractIn} << io_out_d_m1_rec_rawIn_normDist; // @[Mux.scala:50:70] wire [21:0] _io_out_d_m1_rec_rawIn_subnormFract_T_1 = _io_out_d_m1_rec_rawIn_subnormFract_T[21:0]; // @[rawFloatFromFN.scala:52:{33,46}] wire [22:0] io_out_d_m1_rec_rawIn_subnormFract = {_io_out_d_m1_rec_rawIn_subnormFract_T_1, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}] wire [8:0] _io_out_d_m1_rec_rawIn_adjustedExp_T = {4'hF, ~io_out_d_m1_rec_rawIn_normDist}; // @[Mux.scala:50:70] wire [8:0] _io_out_d_m1_rec_rawIn_adjustedExp_T_1 = io_out_d_m1_rec_rawIn_isZeroExpIn ? _io_out_d_m1_rec_rawIn_adjustedExp_T : {1'h0, io_out_d_m1_rec_rawIn_expIn}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18] wire [1:0] _io_out_d_m1_rec_rawIn_adjustedExp_T_2 = io_out_d_m1_rec_rawIn_isZeroExpIn ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14] wire [7:0] _io_out_d_m1_rec_rawIn_adjustedExp_T_3 = {6'h20, _io_out_d_m1_rec_rawIn_adjustedExp_T_2}; // @[rawFloatFromFN.scala:58:{9,14}] wire [9:0] _io_out_d_m1_rec_rawIn_adjustedExp_T_4 = {1'h0, _io_out_d_m1_rec_rawIn_adjustedExp_T_1} + {2'h0, _io_out_d_m1_rec_rawIn_adjustedExp_T_3}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9] wire [8:0] io_out_d_m1_rec_rawIn_adjustedExp = _io_out_d_m1_rec_rawIn_adjustedExp_T_4[8:0]; // @[rawFloatFromFN.scala:57:9] wire [8:0] _io_out_d_m1_rec_rawIn_out_sExp_T = io_out_d_m1_rec_rawIn_adjustedExp; // @[rawFloatFromFN.scala:57:9, :68:28] wire io_out_d_m1_rec_rawIn_isZero = io_out_d_m1_rec_rawIn_isZeroExpIn & io_out_d_m1_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30] wire io_out_d_m1_rec_rawIn_isZero_0 = io_out_d_m1_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :63:19] wire [1:0] _io_out_d_m1_rec_rawIn_isSpecial_T = io_out_d_m1_rec_rawIn_adjustedExp[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32] wire io_out_d_m1_rec_rawIn_isSpecial = &_io_out_d_m1_rec_rawIn_isSpecial_T; // @[rawFloatFromFN.scala:61:{32,57}] wire _io_out_d_m1_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:64:28] wire _io_out_d_m1_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:65:28] wire _io_out_d_m1_rec_T_2 = io_out_d_m1_rec_rawIn_isNaN; // @[recFNFromFN.scala:49:20] wire [9:0] _io_out_d_m1_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:68:42] wire [24:0] _io_out_d_m1_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:70:27] wire io_out_d_m1_rec_rawIn_isInf; // @[rawFloatFromFN.scala:63:19] wire [9:0] io_out_d_m1_rec_rawIn_sExp; // @[rawFloatFromFN.scala:63:19] wire [24:0] io_out_d_m1_rec_rawIn_sig; // @[rawFloatFromFN.scala:63:19] wire _io_out_d_m1_rec_rawIn_out_isNaN_T = ~io_out_d_m1_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :64:31] assign _io_out_d_m1_rec_rawIn_out_isNaN_T_1 = io_out_d_m1_rec_rawIn_isSpecial & _io_out_d_m1_rec_rawIn_out_isNaN_T; // @[rawFloatFromFN.scala:61:57, :64:{28,31}] assign io_out_d_m1_rec_rawIn_isNaN = _io_out_d_m1_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:63:19, :64:28] assign _io_out_d_m1_rec_rawIn_out_isInf_T = io_out_d_m1_rec_rawIn_isSpecial & io_out_d_m1_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28] assign io_out_d_m1_rec_rawIn_isInf = _io_out_d_m1_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:63:19, :65:28] assign _io_out_d_m1_rec_rawIn_out_sExp_T_1 = {1'h0, _io_out_d_m1_rec_rawIn_out_sExp_T}; // @[rawFloatFromFN.scala:68:{28,42}] assign io_out_d_m1_rec_rawIn_sExp = _io_out_d_m1_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:63:19, :68:42] wire _io_out_d_m1_rec_rawIn_out_sig_T = ~io_out_d_m1_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :70:19] wire [1:0] _io_out_d_m1_rec_rawIn_out_sig_T_1 = {1'h0, _io_out_d_m1_rec_rawIn_out_sig_T}; // @[rawFloatFromFN.scala:70:{16,19}] wire [22:0] _io_out_d_m1_rec_rawIn_out_sig_T_2 = io_out_d_m1_rec_rawIn_isZeroExpIn ? io_out_d_m1_rec_rawIn_subnormFract : io_out_d_m1_rec_rawIn_fractIn; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33] assign _io_out_d_m1_rec_rawIn_out_sig_T_3 = {_io_out_d_m1_rec_rawIn_out_sig_T_1, _io_out_d_m1_rec_rawIn_out_sig_T_2}; // @[rawFloatFromFN.scala:70:{16,27,33}] assign io_out_d_m1_rec_rawIn_sig = _io_out_d_m1_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:63:19, :70:27] wire [2:0] _io_out_d_m1_rec_T = io_out_d_m1_rec_rawIn_sExp[8:6]; // @[recFNFromFN.scala:48:50] wire [2:0] _io_out_d_m1_rec_T_1 = io_out_d_m1_rec_rawIn_isZero_0 ? 3'h0 : _io_out_d_m1_rec_T; // @[recFNFromFN.scala:48:{15,50}] wire [2:0] _io_out_d_m1_rec_T_3 = {_io_out_d_m1_rec_T_1[2:1], _io_out_d_m1_rec_T_1[0] | _io_out_d_m1_rec_T_2}; // @[recFNFromFN.scala:48:{15,76}, :49:20] wire [3:0] _io_out_d_m1_rec_T_4 = {io_out_d_m1_rec_rawIn_sign_0, _io_out_d_m1_rec_T_3}; // @[recFNFromFN.scala:47:20, :48:76] wire [5:0] _io_out_d_m1_rec_T_5 = io_out_d_m1_rec_rawIn_sExp[5:0]; // @[recFNFromFN.scala:50:23] wire [9:0] _io_out_d_m1_rec_T_6 = {_io_out_d_m1_rec_T_4, _io_out_d_m1_rec_T_5}; // @[recFNFromFN.scala:47:20, :49:45, :50:23] wire [22:0] _io_out_d_m1_rec_T_7 = io_out_d_m1_rec_rawIn_sig[22:0]; // @[recFNFromFN.scala:51:22] wire [32:0] io_out_d_m1_rec = {_io_out_d_m1_rec_T_6, _io_out_d_m1_rec_T_7}; // @[recFNFromFN.scala:49:45, :50:41, :51:22] wire io_out_d_m2_rec_rawIn_sign = io_in_b_bits_0[31]; // @[rawFloatFromFN.scala:44:18] wire io_out_d_m2_rec_rawIn_sign_0 = io_out_d_m2_rec_rawIn_sign; // @[rawFloatFromFN.scala:44:18, :63:19] wire [7:0] io_out_d_m2_rec_rawIn_expIn = io_in_b_bits_0[30:23]; // @[rawFloatFromFN.scala:45:19] wire [22:0] io_out_d_m2_rec_rawIn_fractIn = io_in_b_bits_0[22:0]; // @[rawFloatFromFN.scala:46:21] wire io_out_d_m2_rec_rawIn_isZeroExpIn = io_out_d_m2_rec_rawIn_expIn == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30] wire io_out_d_m2_rec_rawIn_isZeroFractIn = io_out_d_m2_rec_rawIn_fractIn == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34] wire _io_out_d_m2_rec_rawIn_normDist_T = io_out_d_m2_rec_rawIn_fractIn[0]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_1 = io_out_d_m2_rec_rawIn_fractIn[1]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_2 = io_out_d_m2_rec_rawIn_fractIn[2]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_3 = io_out_d_m2_rec_rawIn_fractIn[3]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_4 = io_out_d_m2_rec_rawIn_fractIn[4]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_5 = io_out_d_m2_rec_rawIn_fractIn[5]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_6 = io_out_d_m2_rec_rawIn_fractIn[6]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_7 = io_out_d_m2_rec_rawIn_fractIn[7]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_8 = io_out_d_m2_rec_rawIn_fractIn[8]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_9 = io_out_d_m2_rec_rawIn_fractIn[9]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_10 = io_out_d_m2_rec_rawIn_fractIn[10]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_11 = io_out_d_m2_rec_rawIn_fractIn[11]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_12 = io_out_d_m2_rec_rawIn_fractIn[12]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_13 = io_out_d_m2_rec_rawIn_fractIn[13]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_14 = io_out_d_m2_rec_rawIn_fractIn[14]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_15 = io_out_d_m2_rec_rawIn_fractIn[15]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_16 = io_out_d_m2_rec_rawIn_fractIn[16]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_17 = io_out_d_m2_rec_rawIn_fractIn[17]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_18 = io_out_d_m2_rec_rawIn_fractIn[18]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_19 = io_out_d_m2_rec_rawIn_fractIn[19]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_20 = io_out_d_m2_rec_rawIn_fractIn[20]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_21 = io_out_d_m2_rec_rawIn_fractIn[21]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_m2_rec_rawIn_normDist_T_22 = io_out_d_m2_rec_rawIn_fractIn[22]; // @[rawFloatFromFN.scala:46:21] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_23 = _io_out_d_m2_rec_rawIn_normDist_T_1 ? 5'h15 : 5'h16; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_24 = _io_out_d_m2_rec_rawIn_normDist_T_2 ? 5'h14 : _io_out_d_m2_rec_rawIn_normDist_T_23; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_25 = _io_out_d_m2_rec_rawIn_normDist_T_3 ? 5'h13 : _io_out_d_m2_rec_rawIn_normDist_T_24; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_26 = _io_out_d_m2_rec_rawIn_normDist_T_4 ? 5'h12 : _io_out_d_m2_rec_rawIn_normDist_T_25; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_27 = _io_out_d_m2_rec_rawIn_normDist_T_5 ? 5'h11 : _io_out_d_m2_rec_rawIn_normDist_T_26; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_28 = _io_out_d_m2_rec_rawIn_normDist_T_6 ? 5'h10 : _io_out_d_m2_rec_rawIn_normDist_T_27; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_29 = _io_out_d_m2_rec_rawIn_normDist_T_7 ? 5'hF : _io_out_d_m2_rec_rawIn_normDist_T_28; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_30 = _io_out_d_m2_rec_rawIn_normDist_T_8 ? 5'hE : _io_out_d_m2_rec_rawIn_normDist_T_29; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_31 = _io_out_d_m2_rec_rawIn_normDist_T_9 ? 5'hD : _io_out_d_m2_rec_rawIn_normDist_T_30; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_32 = _io_out_d_m2_rec_rawIn_normDist_T_10 ? 5'hC : _io_out_d_m2_rec_rawIn_normDist_T_31; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_33 = _io_out_d_m2_rec_rawIn_normDist_T_11 ? 5'hB : _io_out_d_m2_rec_rawIn_normDist_T_32; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_34 = _io_out_d_m2_rec_rawIn_normDist_T_12 ? 5'hA : _io_out_d_m2_rec_rawIn_normDist_T_33; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_35 = _io_out_d_m2_rec_rawIn_normDist_T_13 ? 5'h9 : _io_out_d_m2_rec_rawIn_normDist_T_34; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_36 = _io_out_d_m2_rec_rawIn_normDist_T_14 ? 5'h8 : _io_out_d_m2_rec_rawIn_normDist_T_35; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_37 = _io_out_d_m2_rec_rawIn_normDist_T_15 ? 5'h7 : _io_out_d_m2_rec_rawIn_normDist_T_36; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_38 = _io_out_d_m2_rec_rawIn_normDist_T_16 ? 5'h6 : _io_out_d_m2_rec_rawIn_normDist_T_37; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_39 = _io_out_d_m2_rec_rawIn_normDist_T_17 ? 5'h5 : _io_out_d_m2_rec_rawIn_normDist_T_38; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_40 = _io_out_d_m2_rec_rawIn_normDist_T_18 ? 5'h4 : _io_out_d_m2_rec_rawIn_normDist_T_39; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_41 = _io_out_d_m2_rec_rawIn_normDist_T_19 ? 5'h3 : _io_out_d_m2_rec_rawIn_normDist_T_40; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_42 = _io_out_d_m2_rec_rawIn_normDist_T_20 ? 5'h2 : _io_out_d_m2_rec_rawIn_normDist_T_41; // @[Mux.scala:50:70] wire [4:0] _io_out_d_m2_rec_rawIn_normDist_T_43 = _io_out_d_m2_rec_rawIn_normDist_T_21 ? 5'h1 : _io_out_d_m2_rec_rawIn_normDist_T_42; // @[Mux.scala:50:70] wire [4:0] io_out_d_m2_rec_rawIn_normDist = _io_out_d_m2_rec_rawIn_normDist_T_22 ? 5'h0 : _io_out_d_m2_rec_rawIn_normDist_T_43; // @[Mux.scala:50:70] wire [53:0] _io_out_d_m2_rec_rawIn_subnormFract_T = {31'h0, io_out_d_m2_rec_rawIn_fractIn} << io_out_d_m2_rec_rawIn_normDist; // @[Mux.scala:50:70] wire [21:0] _io_out_d_m2_rec_rawIn_subnormFract_T_1 = _io_out_d_m2_rec_rawIn_subnormFract_T[21:0]; // @[rawFloatFromFN.scala:52:{33,46}] wire [22:0] io_out_d_m2_rec_rawIn_subnormFract = {_io_out_d_m2_rec_rawIn_subnormFract_T_1, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}] wire [8:0] _io_out_d_m2_rec_rawIn_adjustedExp_T = {4'hF, ~io_out_d_m2_rec_rawIn_normDist}; // @[Mux.scala:50:70] wire [8:0] _io_out_d_m2_rec_rawIn_adjustedExp_T_1 = io_out_d_m2_rec_rawIn_isZeroExpIn ? _io_out_d_m2_rec_rawIn_adjustedExp_T : {1'h0, io_out_d_m2_rec_rawIn_expIn}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18] wire [1:0] _io_out_d_m2_rec_rawIn_adjustedExp_T_2 = io_out_d_m2_rec_rawIn_isZeroExpIn ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14] wire [7:0] _io_out_d_m2_rec_rawIn_adjustedExp_T_3 = {6'h20, _io_out_d_m2_rec_rawIn_adjustedExp_T_2}; // @[rawFloatFromFN.scala:58:{9,14}] wire [9:0] _io_out_d_m2_rec_rawIn_adjustedExp_T_4 = {1'h0, _io_out_d_m2_rec_rawIn_adjustedExp_T_1} + {2'h0, _io_out_d_m2_rec_rawIn_adjustedExp_T_3}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9] wire [8:0] io_out_d_m2_rec_rawIn_adjustedExp = _io_out_d_m2_rec_rawIn_adjustedExp_T_4[8:0]; // @[rawFloatFromFN.scala:57:9] wire [8:0] _io_out_d_m2_rec_rawIn_out_sExp_T = io_out_d_m2_rec_rawIn_adjustedExp; // @[rawFloatFromFN.scala:57:9, :68:28] wire io_out_d_m2_rec_rawIn_isZero = io_out_d_m2_rec_rawIn_isZeroExpIn & io_out_d_m2_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30] wire io_out_d_m2_rec_rawIn_isZero_0 = io_out_d_m2_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :63:19] wire [1:0] _io_out_d_m2_rec_rawIn_isSpecial_T = io_out_d_m2_rec_rawIn_adjustedExp[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32] wire io_out_d_m2_rec_rawIn_isSpecial = &_io_out_d_m2_rec_rawIn_isSpecial_T; // @[rawFloatFromFN.scala:61:{32,57}] wire _io_out_d_m2_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:64:28] wire _io_out_d_m2_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:65:28] wire _io_out_d_m2_rec_T_2 = io_out_d_m2_rec_rawIn_isNaN; // @[recFNFromFN.scala:49:20] wire [9:0] _io_out_d_m2_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:68:42] wire [24:0] _io_out_d_m2_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:70:27] wire io_out_d_m2_rec_rawIn_isInf; // @[rawFloatFromFN.scala:63:19] wire [9:0] io_out_d_m2_rec_rawIn_sExp; // @[rawFloatFromFN.scala:63:19] wire [24:0] io_out_d_m2_rec_rawIn_sig; // @[rawFloatFromFN.scala:63:19] wire _io_out_d_m2_rec_rawIn_out_isNaN_T = ~io_out_d_m2_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :64:31] assign _io_out_d_m2_rec_rawIn_out_isNaN_T_1 = io_out_d_m2_rec_rawIn_isSpecial & _io_out_d_m2_rec_rawIn_out_isNaN_T; // @[rawFloatFromFN.scala:61:57, :64:{28,31}] assign io_out_d_m2_rec_rawIn_isNaN = _io_out_d_m2_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:63:19, :64:28] assign _io_out_d_m2_rec_rawIn_out_isInf_T = io_out_d_m2_rec_rawIn_isSpecial & io_out_d_m2_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28] assign io_out_d_m2_rec_rawIn_isInf = _io_out_d_m2_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:63:19, :65:28] assign _io_out_d_m2_rec_rawIn_out_sExp_T_1 = {1'h0, _io_out_d_m2_rec_rawIn_out_sExp_T}; // @[rawFloatFromFN.scala:68:{28,42}] assign io_out_d_m2_rec_rawIn_sExp = _io_out_d_m2_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:63:19, :68:42] wire _io_out_d_m2_rec_rawIn_out_sig_T = ~io_out_d_m2_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :70:19] wire [1:0] _io_out_d_m2_rec_rawIn_out_sig_T_1 = {1'h0, _io_out_d_m2_rec_rawIn_out_sig_T}; // @[rawFloatFromFN.scala:70:{16,19}] wire [22:0] _io_out_d_m2_rec_rawIn_out_sig_T_2 = io_out_d_m2_rec_rawIn_isZeroExpIn ? io_out_d_m2_rec_rawIn_subnormFract : io_out_d_m2_rec_rawIn_fractIn; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33] assign _io_out_d_m2_rec_rawIn_out_sig_T_3 = {_io_out_d_m2_rec_rawIn_out_sig_T_1, _io_out_d_m2_rec_rawIn_out_sig_T_2}; // @[rawFloatFromFN.scala:70:{16,27,33}] assign io_out_d_m2_rec_rawIn_sig = _io_out_d_m2_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:63:19, :70:27] wire [2:0] _io_out_d_m2_rec_T = io_out_d_m2_rec_rawIn_sExp[8:6]; // @[recFNFromFN.scala:48:50] wire [2:0] _io_out_d_m2_rec_T_1 = io_out_d_m2_rec_rawIn_isZero_0 ? 3'h0 : _io_out_d_m2_rec_T; // @[recFNFromFN.scala:48:{15,50}] wire [2:0] _io_out_d_m2_rec_T_3 = {_io_out_d_m2_rec_T_1[2:1], _io_out_d_m2_rec_T_1[0] | _io_out_d_m2_rec_T_2}; // @[recFNFromFN.scala:48:{15,76}, :49:20] wire [3:0] _io_out_d_m2_rec_T_4 = {io_out_d_m2_rec_rawIn_sign_0, _io_out_d_m2_rec_T_3}; // @[recFNFromFN.scala:47:20, :48:76] wire [5:0] _io_out_d_m2_rec_T_5 = io_out_d_m2_rec_rawIn_sExp[5:0]; // @[recFNFromFN.scala:50:23] wire [9:0] _io_out_d_m2_rec_T_6 = {_io_out_d_m2_rec_T_4, _io_out_d_m2_rec_T_5}; // @[recFNFromFN.scala:47:20, :49:45, :50:23] wire [22:0] _io_out_d_m2_rec_T_7 = io_out_d_m2_rec_rawIn_sig[22:0]; // @[recFNFromFN.scala:51:22] wire [32:0] io_out_d_m2_rec = {_io_out_d_m2_rec_T_6, _io_out_d_m2_rec_T_7}; // @[recFNFromFN.scala:49:45, :50:41, :51:22] wire io_out_d_self_rec_rawIn_sign = io_in_c_bits_0[31]; // @[rawFloatFromFN.scala:44:18] wire io_out_d_self_rec_rawIn_sign_0 = io_out_d_self_rec_rawIn_sign; // @[rawFloatFromFN.scala:44:18, :63:19] wire [7:0] io_out_d_self_rec_rawIn_expIn = io_in_c_bits_0[30:23]; // @[rawFloatFromFN.scala:45:19] wire [22:0] io_out_d_self_rec_rawIn_fractIn = io_in_c_bits_0[22:0]; // @[rawFloatFromFN.scala:46:21] wire io_out_d_self_rec_rawIn_isZeroExpIn = io_out_d_self_rec_rawIn_expIn == 8'h0; // @[rawFloatFromFN.scala:45:19, :48:30] wire io_out_d_self_rec_rawIn_isZeroFractIn = io_out_d_self_rec_rawIn_fractIn == 23'h0; // @[rawFloatFromFN.scala:46:21, :49:34] wire _io_out_d_self_rec_rawIn_normDist_T = io_out_d_self_rec_rawIn_fractIn[0]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_1 = io_out_d_self_rec_rawIn_fractIn[1]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_2 = io_out_d_self_rec_rawIn_fractIn[2]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_3 = io_out_d_self_rec_rawIn_fractIn[3]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_4 = io_out_d_self_rec_rawIn_fractIn[4]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_5 = io_out_d_self_rec_rawIn_fractIn[5]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_6 = io_out_d_self_rec_rawIn_fractIn[6]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_7 = io_out_d_self_rec_rawIn_fractIn[7]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_8 = io_out_d_self_rec_rawIn_fractIn[8]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_9 = io_out_d_self_rec_rawIn_fractIn[9]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_10 = io_out_d_self_rec_rawIn_fractIn[10]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_11 = io_out_d_self_rec_rawIn_fractIn[11]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_12 = io_out_d_self_rec_rawIn_fractIn[12]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_13 = io_out_d_self_rec_rawIn_fractIn[13]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_14 = io_out_d_self_rec_rawIn_fractIn[14]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_15 = io_out_d_self_rec_rawIn_fractIn[15]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_16 = io_out_d_self_rec_rawIn_fractIn[16]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_17 = io_out_d_self_rec_rawIn_fractIn[17]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_18 = io_out_d_self_rec_rawIn_fractIn[18]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_19 = io_out_d_self_rec_rawIn_fractIn[19]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_20 = io_out_d_self_rec_rawIn_fractIn[20]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_21 = io_out_d_self_rec_rawIn_fractIn[21]; // @[rawFloatFromFN.scala:46:21] wire _io_out_d_self_rec_rawIn_normDist_T_22 = io_out_d_self_rec_rawIn_fractIn[22]; // @[rawFloatFromFN.scala:46:21] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_23 = _io_out_d_self_rec_rawIn_normDist_T_1 ? 5'h15 : 5'h16; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_24 = _io_out_d_self_rec_rawIn_normDist_T_2 ? 5'h14 : _io_out_d_self_rec_rawIn_normDist_T_23; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_25 = _io_out_d_self_rec_rawIn_normDist_T_3 ? 5'h13 : _io_out_d_self_rec_rawIn_normDist_T_24; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_26 = _io_out_d_self_rec_rawIn_normDist_T_4 ? 5'h12 : _io_out_d_self_rec_rawIn_normDist_T_25; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_27 = _io_out_d_self_rec_rawIn_normDist_T_5 ? 5'h11 : _io_out_d_self_rec_rawIn_normDist_T_26; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_28 = _io_out_d_self_rec_rawIn_normDist_T_6 ? 5'h10 : _io_out_d_self_rec_rawIn_normDist_T_27; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_29 = _io_out_d_self_rec_rawIn_normDist_T_7 ? 5'hF : _io_out_d_self_rec_rawIn_normDist_T_28; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_30 = _io_out_d_self_rec_rawIn_normDist_T_8 ? 5'hE : _io_out_d_self_rec_rawIn_normDist_T_29; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_31 = _io_out_d_self_rec_rawIn_normDist_T_9 ? 5'hD : _io_out_d_self_rec_rawIn_normDist_T_30; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_32 = _io_out_d_self_rec_rawIn_normDist_T_10 ? 5'hC : _io_out_d_self_rec_rawIn_normDist_T_31; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_33 = _io_out_d_self_rec_rawIn_normDist_T_11 ? 5'hB : _io_out_d_self_rec_rawIn_normDist_T_32; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_34 = _io_out_d_self_rec_rawIn_normDist_T_12 ? 5'hA : _io_out_d_self_rec_rawIn_normDist_T_33; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_35 = _io_out_d_self_rec_rawIn_normDist_T_13 ? 5'h9 : _io_out_d_self_rec_rawIn_normDist_T_34; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_36 = _io_out_d_self_rec_rawIn_normDist_T_14 ? 5'h8 : _io_out_d_self_rec_rawIn_normDist_T_35; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_37 = _io_out_d_self_rec_rawIn_normDist_T_15 ? 5'h7 : _io_out_d_self_rec_rawIn_normDist_T_36; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_38 = _io_out_d_self_rec_rawIn_normDist_T_16 ? 5'h6 : _io_out_d_self_rec_rawIn_normDist_T_37; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_39 = _io_out_d_self_rec_rawIn_normDist_T_17 ? 5'h5 : _io_out_d_self_rec_rawIn_normDist_T_38; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_40 = _io_out_d_self_rec_rawIn_normDist_T_18 ? 5'h4 : _io_out_d_self_rec_rawIn_normDist_T_39; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_41 = _io_out_d_self_rec_rawIn_normDist_T_19 ? 5'h3 : _io_out_d_self_rec_rawIn_normDist_T_40; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_42 = _io_out_d_self_rec_rawIn_normDist_T_20 ? 5'h2 : _io_out_d_self_rec_rawIn_normDist_T_41; // @[Mux.scala:50:70] wire [4:0] _io_out_d_self_rec_rawIn_normDist_T_43 = _io_out_d_self_rec_rawIn_normDist_T_21 ? 5'h1 : _io_out_d_self_rec_rawIn_normDist_T_42; // @[Mux.scala:50:70] wire [4:0] io_out_d_self_rec_rawIn_normDist = _io_out_d_self_rec_rawIn_normDist_T_22 ? 5'h0 : _io_out_d_self_rec_rawIn_normDist_T_43; // @[Mux.scala:50:70] wire [53:0] _io_out_d_self_rec_rawIn_subnormFract_T = {31'h0, io_out_d_self_rec_rawIn_fractIn} << io_out_d_self_rec_rawIn_normDist; // @[Mux.scala:50:70] wire [21:0] _io_out_d_self_rec_rawIn_subnormFract_T_1 = _io_out_d_self_rec_rawIn_subnormFract_T[21:0]; // @[rawFloatFromFN.scala:52:{33,46}] wire [22:0] io_out_d_self_rec_rawIn_subnormFract = {_io_out_d_self_rec_rawIn_subnormFract_T_1, 1'h0}; // @[rawFloatFromFN.scala:52:{46,64}] wire [8:0] _io_out_d_self_rec_rawIn_adjustedExp_T = {4'hF, ~io_out_d_self_rec_rawIn_normDist}; // @[Mux.scala:50:70] wire [8:0] _io_out_d_self_rec_rawIn_adjustedExp_T_1 = io_out_d_self_rec_rawIn_isZeroExpIn ? _io_out_d_self_rec_rawIn_adjustedExp_T : {1'h0, io_out_d_self_rec_rawIn_expIn}; // @[rawFloatFromFN.scala:45:19, :48:30, :54:10, :55:18] wire [1:0] _io_out_d_self_rec_rawIn_adjustedExp_T_2 = io_out_d_self_rec_rawIn_isZeroExpIn ? 2'h2 : 2'h1; // @[rawFloatFromFN.scala:48:30, :58:14] wire [7:0] _io_out_d_self_rec_rawIn_adjustedExp_T_3 = {6'h20, _io_out_d_self_rec_rawIn_adjustedExp_T_2}; // @[rawFloatFromFN.scala:58:{9,14}] wire [9:0] _io_out_d_self_rec_rawIn_adjustedExp_T_4 = {1'h0, _io_out_d_self_rec_rawIn_adjustedExp_T_1} + {2'h0, _io_out_d_self_rec_rawIn_adjustedExp_T_3}; // @[rawFloatFromFN.scala:54:10, :57:9, :58:9] wire [8:0] io_out_d_self_rec_rawIn_adjustedExp = _io_out_d_self_rec_rawIn_adjustedExp_T_4[8:0]; // @[rawFloatFromFN.scala:57:9] wire [8:0] _io_out_d_self_rec_rawIn_out_sExp_T = io_out_d_self_rec_rawIn_adjustedExp; // @[rawFloatFromFN.scala:57:9, :68:28] wire io_out_d_self_rec_rawIn_isZero = io_out_d_self_rec_rawIn_isZeroExpIn & io_out_d_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:48:30, :49:34, :60:30] wire io_out_d_self_rec_rawIn_isZero_0 = io_out_d_self_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :63:19] wire [1:0] _io_out_d_self_rec_rawIn_isSpecial_T = io_out_d_self_rec_rawIn_adjustedExp[8:7]; // @[rawFloatFromFN.scala:57:9, :61:32] wire io_out_d_self_rec_rawIn_isSpecial = &_io_out_d_self_rec_rawIn_isSpecial_T; // @[rawFloatFromFN.scala:61:{32,57}] wire _io_out_d_self_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:64:28] wire _io_out_d_self_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:65:28] wire _io_out_d_self_rec_T_2 = io_out_d_self_rec_rawIn_isNaN; // @[recFNFromFN.scala:49:20] wire [9:0] _io_out_d_self_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:68:42] wire [24:0] _io_out_d_self_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:70:27] wire io_out_d_self_rec_rawIn_isInf; // @[rawFloatFromFN.scala:63:19] wire [9:0] io_out_d_self_rec_rawIn_sExp; // @[rawFloatFromFN.scala:63:19] wire [24:0] io_out_d_self_rec_rawIn_sig; // @[rawFloatFromFN.scala:63:19] wire _io_out_d_self_rec_rawIn_out_isNaN_T = ~io_out_d_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :64:31] assign _io_out_d_self_rec_rawIn_out_isNaN_T_1 = io_out_d_self_rec_rawIn_isSpecial & _io_out_d_self_rec_rawIn_out_isNaN_T; // @[rawFloatFromFN.scala:61:57, :64:{28,31}] assign io_out_d_self_rec_rawIn_isNaN = _io_out_d_self_rec_rawIn_out_isNaN_T_1; // @[rawFloatFromFN.scala:63:19, :64:28] assign _io_out_d_self_rec_rawIn_out_isInf_T = io_out_d_self_rec_rawIn_isSpecial & io_out_d_self_rec_rawIn_isZeroFractIn; // @[rawFloatFromFN.scala:49:34, :61:57, :65:28] assign io_out_d_self_rec_rawIn_isInf = _io_out_d_self_rec_rawIn_out_isInf_T; // @[rawFloatFromFN.scala:63:19, :65:28] assign _io_out_d_self_rec_rawIn_out_sExp_T_1 = {1'h0, _io_out_d_self_rec_rawIn_out_sExp_T}; // @[rawFloatFromFN.scala:68:{28,42}] assign io_out_d_self_rec_rawIn_sExp = _io_out_d_self_rec_rawIn_out_sExp_T_1; // @[rawFloatFromFN.scala:63:19, :68:42] wire _io_out_d_self_rec_rawIn_out_sig_T = ~io_out_d_self_rec_rawIn_isZero; // @[rawFloatFromFN.scala:60:30, :70:19] wire [1:0] _io_out_d_self_rec_rawIn_out_sig_T_1 = {1'h0, _io_out_d_self_rec_rawIn_out_sig_T}; // @[rawFloatFromFN.scala:70:{16,19}] wire [22:0] _io_out_d_self_rec_rawIn_out_sig_T_2 = io_out_d_self_rec_rawIn_isZeroExpIn ? io_out_d_self_rec_rawIn_subnormFract : io_out_d_self_rec_rawIn_fractIn; // @[rawFloatFromFN.scala:46:21, :48:30, :52:64, :70:33] assign _io_out_d_self_rec_rawIn_out_sig_T_3 = {_io_out_d_self_rec_rawIn_out_sig_T_1, _io_out_d_self_rec_rawIn_out_sig_T_2}; // @[rawFloatFromFN.scala:70:{16,27,33}] assign io_out_d_self_rec_rawIn_sig = _io_out_d_self_rec_rawIn_out_sig_T_3; // @[rawFloatFromFN.scala:63:19, :70:27] wire [2:0] _io_out_d_self_rec_T = io_out_d_self_rec_rawIn_sExp[8:6]; // @[recFNFromFN.scala:48:50] wire [2:0] _io_out_d_self_rec_T_1 = io_out_d_self_rec_rawIn_isZero_0 ? 3'h0 : _io_out_d_self_rec_T; // @[recFNFromFN.scala:48:{15,50}] wire [2:0] _io_out_d_self_rec_T_3 = {_io_out_d_self_rec_T_1[2:1], _io_out_d_self_rec_T_1[0] | _io_out_d_self_rec_T_2}; // @[recFNFromFN.scala:48:{15,76}, :49:20] wire [3:0] _io_out_d_self_rec_T_4 = {io_out_d_self_rec_rawIn_sign_0, _io_out_d_self_rec_T_3}; // @[recFNFromFN.scala:47:20, :48:76] wire [5:0] _io_out_d_self_rec_T_5 = io_out_d_self_rec_rawIn_sExp[5:0]; // @[recFNFromFN.scala:50:23] wire [9:0] _io_out_d_self_rec_T_6 = {_io_out_d_self_rec_T_4, _io_out_d_self_rec_T_5}; // @[recFNFromFN.scala:47:20, :49:45, :50:23] wire [22:0] _io_out_d_self_rec_T_7 = io_out_d_self_rec_rawIn_sig[22:0]; // @[recFNFromFN.scala:51:22] wire [32:0] io_out_d_self_rec = {_io_out_d_self_rec_T_6, _io_out_d_self_rec_T_7}; // @[recFNFromFN.scala:49:45, :50:41, :51:22] wire [31:0] _io_out_d_out_bits_T; // @[fNFromRecFN.scala:66:12] assign io_out_d_bits_0 = io_out_d_out_bits; // @[PE.scala:14:7] wire [8:0] io_out_d_out_bits_rawIn_exp = _io_out_d_muladder_io_out[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _io_out_d_out_bits_rawIn_isZero_T = io_out_d_out_bits_rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire io_out_d_out_bits_rawIn_isZero = _io_out_d_out_bits_rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] wire io_out_d_out_bits_rawIn_isZero_0 = io_out_d_out_bits_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _io_out_d_out_bits_rawIn_isSpecial_T = io_out_d_out_bits_rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire io_out_d_out_bits_rawIn_isSpecial = &_io_out_d_out_bits_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _io_out_d_out_bits_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _io_out_d_out_bits_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] wire _io_out_d_out_bits_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [9:0] _io_out_d_out_bits_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _io_out_d_out_bits_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire io_out_d_out_bits_rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire io_out_d_out_bits_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23] wire io_out_d_out_bits_rawIn_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] io_out_d_out_bits_rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] io_out_d_out_bits_rawIn_sig; // @[rawFloatFromRecFN.scala:55:23] wire _io_out_d_out_bits_rawIn_out_isNaN_T = io_out_d_out_bits_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _io_out_d_out_bits_rawIn_out_isInf_T = io_out_d_out_bits_rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _io_out_d_out_bits_rawIn_out_isNaN_T_1 = io_out_d_out_bits_rawIn_isSpecial & _io_out_d_out_bits_rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign io_out_d_out_bits_rawIn_isNaN = _io_out_d_out_bits_rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _io_out_d_out_bits_rawIn_out_isInf_T_1 = ~_io_out_d_out_bits_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _io_out_d_out_bits_rawIn_out_isInf_T_2 = io_out_d_out_bits_rawIn_isSpecial & _io_out_d_out_bits_rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign io_out_d_out_bits_rawIn_isInf = _io_out_d_out_bits_rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _io_out_d_out_bits_rawIn_out_sign_T = _io_out_d_muladder_io_out[32]; // @[rawFloatFromRecFN.scala:59:25] assign io_out_d_out_bits_rawIn_sign = _io_out_d_out_bits_rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _io_out_d_out_bits_rawIn_out_sExp_T = {1'h0, io_out_d_out_bits_rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign io_out_d_out_bits_rawIn_sExp = _io_out_d_out_bits_rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _io_out_d_out_bits_rawIn_out_sig_T = ~io_out_d_out_bits_rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _io_out_d_out_bits_rawIn_out_sig_T_1 = {1'h0, _io_out_d_out_bits_rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _io_out_d_out_bits_rawIn_out_sig_T_2 = _io_out_d_muladder_io_out[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _io_out_d_out_bits_rawIn_out_sig_T_3 = {_io_out_d_out_bits_rawIn_out_sig_T_1, _io_out_d_out_bits_rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign io_out_d_out_bits_rawIn_sig = _io_out_d_out_bits_rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] wire io_out_d_out_bits_isSubnormal = $signed(io_out_d_out_bits_rawIn_sExp) < 10'sh82; // @[rawFloatFromRecFN.scala:55:23] wire [4:0] _io_out_d_out_bits_denormShiftDist_T = io_out_d_out_bits_rawIn_sExp[4:0]; // @[rawFloatFromRecFN.scala:55:23] wire [5:0] _io_out_d_out_bits_denormShiftDist_T_1 = 6'h1 - {1'h0, _io_out_d_out_bits_denormShiftDist_T}; // @[fNFromRecFN.scala:52:{35,47}] wire [4:0] io_out_d_out_bits_denormShiftDist = _io_out_d_out_bits_denormShiftDist_T_1[4:0]; // @[fNFromRecFN.scala:52:35] wire [23:0] _io_out_d_out_bits_denormFract_T = io_out_d_out_bits_rawIn_sig[24:1]; // @[rawFloatFromRecFN.scala:55:23] wire [23:0] _io_out_d_out_bits_denormFract_T_1 = _io_out_d_out_bits_denormFract_T >> io_out_d_out_bits_denormShiftDist; // @[fNFromRecFN.scala:52:35, :53:{38,42}] wire [22:0] io_out_d_out_bits_denormFract = _io_out_d_out_bits_denormFract_T_1[22:0]; // @[fNFromRecFN.scala:53:{42,60}] wire [7:0] _io_out_d_out_bits_expOut_T = io_out_d_out_bits_rawIn_sExp[7:0]; // @[rawFloatFromRecFN.scala:55:23] wire [8:0] _io_out_d_out_bits_expOut_T_1 = {1'h0, _io_out_d_out_bits_expOut_T} - 9'h81; // @[fNFromRecFN.scala:58:{27,45}] wire [7:0] _io_out_d_out_bits_expOut_T_2 = _io_out_d_out_bits_expOut_T_1[7:0]; // @[fNFromRecFN.scala:58:45] wire [7:0] _io_out_d_out_bits_expOut_T_3 = io_out_d_out_bits_isSubnormal ? 8'h0 : _io_out_d_out_bits_expOut_T_2; // @[fNFromRecFN.scala:51:38, :56:16, :58:45] wire _io_out_d_out_bits_expOut_T_4 = io_out_d_out_bits_rawIn_isNaN | io_out_d_out_bits_rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23] wire [7:0] _io_out_d_out_bits_expOut_T_5 = {8{_io_out_d_out_bits_expOut_T_4}}; // @[fNFromRecFN.scala:60:{21,44}] wire [7:0] io_out_d_out_bits_expOut = _io_out_d_out_bits_expOut_T_3 | _io_out_d_out_bits_expOut_T_5; // @[fNFromRecFN.scala:56:16, :60:{15,21}] wire [22:0] _io_out_d_out_bits_fractOut_T = io_out_d_out_bits_rawIn_sig[22:0]; // @[rawFloatFromRecFN.scala:55:23] wire [22:0] _io_out_d_out_bits_fractOut_T_1 = io_out_d_out_bits_rawIn_isInf ? 23'h0 : _io_out_d_out_bits_fractOut_T; // @[rawFloatFromRecFN.scala:55:23] wire [22:0] io_out_d_out_bits_fractOut = io_out_d_out_bits_isSubnormal ? io_out_d_out_bits_denormFract : _io_out_d_out_bits_fractOut_T_1; // @[fNFromRecFN.scala:51:38, :53:60, :62:16, :64:20] wire [8:0] io_out_d_out_bits_hi = {io_out_d_out_bits_rawIn_sign, io_out_d_out_bits_expOut}; // @[rawFloatFromRecFN.scala:55:23] assign _io_out_d_out_bits_T = {io_out_d_out_bits_hi, io_out_d_out_bits_fractOut}; // @[fNFromRecFN.scala:62:16, :66:12] assign io_out_d_out_bits = _io_out_d_out_bits_T; // @[fNFromRecFN.scala:66:12] RecFNToRecFN_210 io_out_d_m1_resizer ( // @[Arithmetic.scala:362:32] .io_in (io_out_d_m1_rec), // @[recFNFromFN.scala:50:41] .io_out (_io_out_d_m1_resizer_io_out) ); // @[Arithmetic.scala:362:32] RecFNToRecFN_211 io_out_d_m2_resizer ( // @[Arithmetic.scala:369:32] .io_in (io_out_d_m2_rec), // @[recFNFromFN.scala:50:41] .io_out (_io_out_d_m2_resizer_io_out) ); // @[Arithmetic.scala:369:32] MulAddRecFN_e8_s24_75 io_out_d_muladder ( // @[Arithmetic.scala:376:30] .io_a (_io_out_d_m1_resizer_io_out), // @[Arithmetic.scala:362:32] .io_b (_io_out_d_m2_resizer_io_out), // @[Arithmetic.scala:369:32] .io_c (io_out_d_self_rec), // @[recFNFromFN.scala:50:41] .io_out (_io_out_d_muladder_io_out) ); // @[Arithmetic.scala:376:30] assign io_out_d_bits = io_out_d_bits_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_12( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [19:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [19:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_3; // @[Arithmetic.scala:93:54] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [20:0] _io_out_d_T_1 = {{5{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[19], io_in_c_0}; // @[PE.scala:14:7] wire [19:0] _io_out_d_T_2 = _io_out_d_T_1[19:0]; // @[Arithmetic.scala:93:54] assign _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3; // @[PE.scala:14:7] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File LatencyInjectionQueue.scala: package compressacc import chisel3._ import chisel3.util._ import chisel3.util._ import freechips.rocketchip.util.DecoupledHelper class LatencyInjectionQueue[T <: Data](data: T, depth: Int) extends Module { val io = IO(new Bundle { val latency_cycles = Input(UInt(64.W)) val enq = Flipped(Decoupled(data)) val deq = Decoupled(data) }) val cur_cycle = RegInit(0.U(64.W)) cur_cycle := cur_cycle + 1.U val queue = Module(new Queue(data, depth)) val release_ready_cycle_q = Module(new Queue(UInt(64.W), depth)) release_ready_cycle_q.io.enq.bits := cur_cycle + io.latency_cycles queue.io.enq.bits := io.enq.bits io.deq.bits := queue.io.deq.bits val enq_fire = DecoupledHelper( queue.io.enq.ready, release_ready_cycle_q.io.enq.ready, io.enq.valid ) queue.io.enq.valid := enq_fire.fire(queue.io.enq.ready) release_ready_cycle_q.io.enq.valid := enq_fire.fire(release_ready_cycle_q.io.enq.ready) io.enq.ready := enq_fire.fire(io.enq.valid) val deq_fire = DecoupledHelper( queue.io.deq.valid, release_ready_cycle_q.io.deq.valid, release_ready_cycle_q.io.deq.bits <= cur_cycle, io.deq.ready ) queue.io.deq.ready := deq_fire.fire(queue.io.deq.valid) release_ready_cycle_q.io.deq.ready := deq_fire.fire(release_ready_cycle_q.io.deq.valid) io.deq.valid := deq_fire.fire(io.deq.ready) }
module LatencyInjectionQueue_29( // @[LatencyInjectionQueue.scala:9:7] input clock, // @[LatencyInjectionQueue.scala:9:7] input reset, // @[LatencyInjectionQueue.scala:9:7] input [63:0] io_latency_cycles, // @[LatencyInjectionQueue.scala:10:14] output io_enq_ready, // @[LatencyInjectionQueue.scala:10:14] input io_enq_valid, // @[LatencyInjectionQueue.scala:10:14] input [2:0] io_enq_bits_opcode, // @[LatencyInjectionQueue.scala:10:14] input [1:0] io_enq_bits_param, // @[LatencyInjectionQueue.scala:10:14] input [3:0] io_enq_bits_size, // @[LatencyInjectionQueue.scala:10:14] input [4:0] io_enq_bits_source, // @[LatencyInjectionQueue.scala:10:14] input [2:0] io_enq_bits_sink, // @[LatencyInjectionQueue.scala:10:14] input io_enq_bits_denied, // @[LatencyInjectionQueue.scala:10:14] input [255:0] io_enq_bits_data, // @[LatencyInjectionQueue.scala:10:14] input io_enq_bits_corrupt, // @[LatencyInjectionQueue.scala:10:14] input io_deq_ready, // @[LatencyInjectionQueue.scala:10:14] output io_deq_valid, // @[LatencyInjectionQueue.scala:10:14] output [4:0] io_deq_bits_source, // @[LatencyInjectionQueue.scala:10:14] output [255:0] io_deq_bits_data // @[LatencyInjectionQueue.scala:10:14] ); wire _release_ready_cycle_q_io_enq_ready; // @[LatencyInjectionQueue.scala:19:37] wire _release_ready_cycle_q_io_deq_valid; // @[LatencyInjectionQueue.scala:19:37] wire [63:0] _release_ready_cycle_q_io_deq_bits; // @[LatencyInjectionQueue.scala:19:37] wire _queue_io_enq_ready; // @[LatencyInjectionQueue.scala:18:21] wire _queue_io_deq_valid; // @[LatencyInjectionQueue.scala:18:21] wire [63:0] io_latency_cycles_0 = io_latency_cycles; // @[LatencyInjectionQueue.scala:9:7] wire io_enq_valid_0 = io_enq_valid; // @[LatencyInjectionQueue.scala:9:7] wire [2:0] io_enq_bits_opcode_0 = io_enq_bits_opcode; // @[LatencyInjectionQueue.scala:9:7] wire [1:0] io_enq_bits_param_0 = io_enq_bits_param; // @[LatencyInjectionQueue.scala:9:7] wire [3:0] io_enq_bits_size_0 = io_enq_bits_size; // @[LatencyInjectionQueue.scala:9:7] wire [4:0] io_enq_bits_source_0 = io_enq_bits_source; // @[LatencyInjectionQueue.scala:9:7] wire [2:0] io_enq_bits_sink_0 = io_enq_bits_sink; // @[LatencyInjectionQueue.scala:9:7] wire io_enq_bits_denied_0 = io_enq_bits_denied; // @[LatencyInjectionQueue.scala:9:7] wire [255:0] io_enq_bits_data_0 = io_enq_bits_data; // @[LatencyInjectionQueue.scala:9:7] wire io_enq_bits_corrupt_0 = io_enq_bits_corrupt; // @[LatencyInjectionQueue.scala:9:7] wire io_deq_ready_0 = io_deq_ready; // @[LatencyInjectionQueue.scala:9:7] wire _io_enq_ready_T; // @[Misc.scala:26:53] wire _io_deq_valid_T_1; // @[Misc.scala:26:53] wire io_enq_ready_0; // @[LatencyInjectionQueue.scala:9:7] wire [2:0] io_deq_bits_opcode; // @[LatencyInjectionQueue.scala:9:7] wire [1:0] io_deq_bits_param; // @[LatencyInjectionQueue.scala:9:7] wire [3:0] io_deq_bits_size; // @[LatencyInjectionQueue.scala:9:7] wire [4:0] io_deq_bits_source_0; // @[LatencyInjectionQueue.scala:9:7] wire [2:0] io_deq_bits_sink; // @[LatencyInjectionQueue.scala:9:7] wire io_deq_bits_denied; // @[LatencyInjectionQueue.scala:9:7] wire [255:0] io_deq_bits_data_0; // @[LatencyInjectionQueue.scala:9:7] wire io_deq_bits_corrupt; // @[LatencyInjectionQueue.scala:9:7] wire io_deq_valid_0; // @[LatencyInjectionQueue.scala:9:7] reg [63:0] cur_cycle; // @[LatencyInjectionQueue.scala:16:26] wire [64:0] _GEN = {1'h0, cur_cycle}; // @[LatencyInjectionQueue.scala:16:26, :17:26] wire [64:0] _cur_cycle_T = _GEN + 65'h1; // @[LatencyInjectionQueue.scala:17:26] wire [63:0] _cur_cycle_T_1 = _cur_cycle_T[63:0]; // @[LatencyInjectionQueue.scala:17:26] wire [64:0] _release_ready_cycle_q_io_enq_bits_T = _GEN + {1'h0, io_latency_cycles_0}; // @[LatencyInjectionQueue.scala:9:7, :17:26, :21:50] wire [63:0] _release_ready_cycle_q_io_enq_bits_T_1 = _release_ready_cycle_q_io_enq_bits_T[63:0]; // @[LatencyInjectionQueue.scala:21:50] wire _queue_io_enq_valid_T = _release_ready_cycle_q_io_enq_ready & io_enq_valid_0; // @[Misc.scala:26:53] wire _release_ready_cycle_q_io_enq_valid_T = _queue_io_enq_ready & io_enq_valid_0; // @[Misc.scala:26:53] assign _io_enq_ready_T = _queue_io_enq_ready & _release_ready_cycle_q_io_enq_ready; // @[Misc.scala:26:53] assign io_enq_ready_0 = _io_enq_ready_T; // @[Misc.scala:26:53] wire _T = _release_ready_cycle_q_io_deq_bits <= cur_cycle; // @[LatencyInjectionQueue.scala:16:26, :19:37, :38:39] wire _queue_io_deq_ready_T = _release_ready_cycle_q_io_deq_valid & _T; // @[Misc.scala:26:53] wire _queue_io_deq_ready_T_1 = _queue_io_deq_ready_T & io_deq_ready_0; // @[Misc.scala:26:53] wire _release_ready_cycle_q_io_deq_ready_T = _queue_io_deq_valid & _T; // @[Misc.scala:26:53] wire _release_ready_cycle_q_io_deq_ready_T_1 = _release_ready_cycle_q_io_deq_ready_T & io_deq_ready_0; // @[Misc.scala:26:53] wire _io_deq_valid_T = _queue_io_deq_valid & _release_ready_cycle_q_io_deq_valid; // @[Misc.scala:26:53] assign _io_deq_valid_T_1 = _io_deq_valid_T & _T; // @[Misc.scala:26:53] assign io_deq_valid_0 = _io_deq_valid_T_1; // @[Misc.scala:26:53] always @(posedge clock) begin // @[LatencyInjectionQueue.scala:9:7] if (reset) // @[LatencyInjectionQueue.scala:9:7] cur_cycle <= 64'h0; // @[LatencyInjectionQueue.scala:16:26] else // @[LatencyInjectionQueue.scala:9:7] cur_cycle <= _cur_cycle_T_1; // @[LatencyInjectionQueue.scala:16:26, :17:26] always @(posedge) Queue64_TLBundleD_a32d256s5k3z4u_10 queue ( // @[LatencyInjectionQueue.scala:18:21] .clock (clock), .reset (reset), .io_enq_ready (_queue_io_enq_ready), .io_enq_valid (_queue_io_enq_valid_T), // @[Misc.scala:26:53] .io_enq_bits_opcode (io_enq_bits_opcode_0), // @[LatencyInjectionQueue.scala:9:7] .io_enq_bits_param (io_enq_bits_param_0), // @[LatencyInjectionQueue.scala:9:7] .io_enq_bits_size (io_enq_bits_size_0), // @[LatencyInjectionQueue.scala:9:7] .io_enq_bits_source (io_enq_bits_source_0), // @[LatencyInjectionQueue.scala:9:7] .io_enq_bits_sink (io_enq_bits_sink_0), // @[LatencyInjectionQueue.scala:9:7] .io_enq_bits_denied (io_enq_bits_denied_0), // @[LatencyInjectionQueue.scala:9:7] .io_enq_bits_data (io_enq_bits_data_0), // @[LatencyInjectionQueue.scala:9:7] .io_enq_bits_corrupt (io_enq_bits_corrupt_0), // @[LatencyInjectionQueue.scala:9:7] .io_deq_ready (_queue_io_deq_ready_T_1), // @[Misc.scala:26:53] .io_deq_valid (_queue_io_deq_valid), .io_deq_bits_opcode (io_deq_bits_opcode), .io_deq_bits_param (io_deq_bits_param), .io_deq_bits_size (io_deq_bits_size), .io_deq_bits_source (io_deq_bits_source_0), .io_deq_bits_sink (io_deq_bits_sink), .io_deq_bits_denied (io_deq_bits_denied), .io_deq_bits_data (io_deq_bits_data_0), .io_deq_bits_corrupt (io_deq_bits_corrupt) ); // @[LatencyInjectionQueue.scala:18:21] Queue64_UInt64_21 release_ready_cycle_q ( // @[LatencyInjectionQueue.scala:19:37] .clock (clock), .reset (reset), .io_enq_ready (_release_ready_cycle_q_io_enq_ready), .io_enq_valid (_release_ready_cycle_q_io_enq_valid_T), // @[Misc.scala:26:53] .io_enq_bits (_release_ready_cycle_q_io_enq_bits_T_1), // @[LatencyInjectionQueue.scala:21:50] .io_deq_ready (_release_ready_cycle_q_io_deq_ready_T_1), // @[Misc.scala:26:53] .io_deq_valid (_release_ready_cycle_q_io_deq_valid), .io_deq_bits (_release_ready_cycle_q_io_deq_bits) ); // @[LatencyInjectionQueue.scala:19:37] assign io_enq_ready = io_enq_ready_0; // @[LatencyInjectionQueue.scala:9:7] assign io_deq_valid = io_deq_valid_0; // @[LatencyInjectionQueue.scala:9:7] assign io_deq_bits_source = io_deq_bits_source_0; // @[LatencyInjectionQueue.scala:9:7] assign io_deq_bits_data = io_deq_bits_data_0; // @[LatencyInjectionQueue.scala:9:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Transposer.scala: package gemmini import chisel3._ import chisel3.util._ import Util._ trait Transposer[T <: Data] extends Module { def dim: Int def dataType: T val io = IO(new Bundle { val inRow = Flipped(Decoupled(Vec(dim, dataType))) val outCol = Decoupled(Vec(dim, dataType)) }) } class PipelinedTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose val sMoveUp :: sMoveLeft :: Nil = Enum(2) val state = RegInit(sMoveUp) val leftCounter = RegInit(0.U(log2Ceil(dim+1).W)) //(io.inRow.fire && state === sMoveLeft, dim+1) val upCounter = RegInit(0.U(log2Ceil(dim+1).W)) //Counter(io.inRow.fire && state === sMoveUp, dim+1) io.outCol.valid := 0.U io.inRow.ready := 0.U switch(state) { is(sMoveUp) { io.inRow.ready := upCounter <= dim.U io.outCol.valid := leftCounter > 0.U when(io.inRow.fire) { upCounter := upCounter + 1.U } when(upCounter === (dim-1).U) { state := sMoveLeft leftCounter := 0.U } when(io.outCol.fire) { leftCounter := leftCounter - 1.U } } is(sMoveLeft) { io.inRow.ready := leftCounter <= dim.U // TODO: this is naive io.outCol.valid := upCounter > 0.U when(leftCounter === (dim-1).U) { state := sMoveUp } when(io.inRow.fire) { leftCounter := leftCounter + 1.U upCounter := 0.U } when(io.outCol.fire) { upCounter := upCounter - 1.U } } } // Propagate input from bottom row to top row systolically in the move up phase // TODO: need to iterate over columns to connect Chisel values of type T // Should be able to operate directly on the Vec, but Seq and Vec don't mix (try Array?) for (colIdx <- 0 until dim) { regArray.foldRight(io.inRow.bits(colIdx)) { case (regRow, prevReg) => when (state === sMoveUp) { regRow(colIdx) := prevReg } regRow(colIdx) } } // Propagate input from right side to left side systolically in the move left phase for (rowIdx <- 0 until dim) { regArrayT.foldRight(io.inRow.bits(rowIdx)) { case (regCol, prevReg) => when (state === sMoveLeft) { regCol(rowIdx) := prevReg } regCol(rowIdx) } } // Pull from the left side or the top side based on the state for (idx <- 0 until dim) { when (state === sMoveUp) { io.outCol.bits(idx) := regArray(0)(idx) }.elsewhen(state === sMoveLeft) { io.outCol.bits(idx) := regArrayT(0)(idx) }.otherwise { io.outCol.bits(idx) := DontCare } } } class AlwaysOutTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { require(isPow2(dim)) val LEFT_DIR = 0.U(1.W) val UP_DIR = 1.U(1.W) class PE extends Module { val io = IO(new Bundle { val inR = Input(dataType) val inD = Input(dataType) val outL = Output(dataType) val outU = Output(dataType) val dir = Input(UInt(1.W)) val en = Input(Bool()) }) val reg = RegEnable(Mux(io.dir === LEFT_DIR, io.inR, io.inD), io.en) io.outU := reg io.outL := reg } val pes = Seq.fill(dim,dim)(Module(new PE)) val counter = RegInit(0.U((log2Ceil(dim) max 1).W)) // TODO replace this with a standard Chisel counter val dir = RegInit(LEFT_DIR) // Wire up horizontal signals for (row <- 0 until dim; col <- 0 until dim) { val right_in = if (col == dim-1) io.inRow.bits(row) else pes(row)(col+1).io.outL pes(row)(col).io.inR := right_in } // Wire up vertical signals for (row <- 0 until dim; col <- 0 until dim) { val down_in = if (row == dim-1) io.inRow.bits(col) else pes(row+1)(col).io.outU pes(row)(col).io.inD := down_in } // Wire up global signals pes.flatten.foreach(_.io.dir := dir) pes.flatten.foreach(_.io.en := io.inRow.fire) io.outCol.valid := true.B io.inRow.ready := true.B val left_out = VecInit(pes.transpose.head.map(_.io.outL)) val up_out = VecInit(pes.head.map(_.io.outU)) io.outCol.bits := Mux(dir === LEFT_DIR, left_out, up_out) when (io.inRow.fire) { counter := wrappingAdd(counter, 1.U, dim) } when (counter === (dim-1).U && io.inRow.fire) { dir := ~dir } } class NaiveTransposer[T <: Data](val dim: Int, val dataType: T) extends Transposer[T] { val regArray = Seq.fill(dim, dim)(Reg(dataType)) val regArrayT = regArray.transpose // state = 0 => filling regArray row-wise, state = 1 => draining regArray column-wise val state = RegInit(0.U(1.W)) val countInc = io.inRow.fire || io.outCol.fire val (countValue, countWrap) = Counter(countInc, dim) io.inRow.ready := state === 0.U io.outCol.valid := state === 1.U for (i <- 0 until dim) { for (j <- 0 until dim) { when(countValue === i.U && io.inRow.fire) { regArray(i)(j) := io.inRow.bits(j) } } } for (i <- 0 until dim) { io.outCol.bits(i) := 0.U for (j <- 0 until dim) { when(countValue === j.U) { io.outCol.bits(i) := regArrayT(j)(i) } } } when (io.inRow.fire && countWrap) { state := 1.U } when (io.outCol.fire && countWrap) { state := 0.U } assert(!(state === 0.U) || !io.outCol.fire) assert(!(state === 1.U) || !io.inRow.fire) }
module PE_166( // @[Transposer.scala:100:9] input clock, // @[Transposer.scala:100:9] input reset, // @[Transposer.scala:100:9] input [7:0] io_inR, // @[Transposer.scala:101:16] input [7:0] io_inD, // @[Transposer.scala:101:16] output [7:0] io_outL, // @[Transposer.scala:101:16] output [7:0] io_outU, // @[Transposer.scala:101:16] input io_dir, // @[Transposer.scala:101:16] input io_en // @[Transposer.scala:101:16] ); wire [7:0] io_inR_0 = io_inR; // @[Transposer.scala:100:9] wire [7:0] io_inD_0 = io_inD; // @[Transposer.scala:100:9] wire io_dir_0 = io_dir; // @[Transposer.scala:100:9] wire io_en_0 = io_en; // @[Transposer.scala:100:9] wire [7:0] io_outL_0; // @[Transposer.scala:100:9] wire [7:0] io_outU_0; // @[Transposer.scala:100:9] wire _reg_T = ~io_dir_0; // @[Transposer.scala:100:9, :110:36] wire [7:0] _reg_T_1 = _reg_T ? io_inR_0 : io_inD_0; // @[Transposer.scala:100:9, :110:{28,36}] reg [7:0] reg_0; // @[Transposer.scala:110:24] assign io_outL_0 = reg_0; // @[Transposer.scala:100:9, :110:24] assign io_outU_0 = reg_0; // @[Transposer.scala:100:9, :110:24] always @(posedge clock) begin // @[Transposer.scala:100:9] if (io_en_0) // @[Transposer.scala:100:9] reg_0 <= _reg_T_1; // @[Transposer.scala:110:{24,28}] always @(posedge) assign io_outL = io_outL_0; // @[Transposer.scala:100:9] assign io_outU = io_outU_0; // @[Transposer.scala:100:9] endmodule
Generate the Verilog code corresponding to the following Chisel files. File RecFNToRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import consts._ class RecFNToRecFN( inExpWidth: Int, inSigWidth: Int, outExpWidth: Int, outSigWidth: Int) extends chisel3.RawModule { val io = IO(new Bundle { val in = Input(Bits((inExpWidth + inSigWidth + 1).W)) val roundingMode = Input(UInt(3.W)) val detectTininess = Input(UInt(1.W)) val out = Output(Bits((outExpWidth + outSigWidth + 1).W)) val exceptionFlags = Output(Bits(5.W)) }) //------------------------------------------------------------------------ //------------------------------------------------------------------------ val rawIn = rawFloatFromRecFN(inExpWidth, inSigWidth, io.in); if ((inExpWidth == outExpWidth) && (inSigWidth <= outSigWidth)) { //-------------------------------------------------------------------- //-------------------------------------------------------------------- io.out := io.in<<(outSigWidth - inSigWidth) io.exceptionFlags := isSigNaNRawFloat(rawIn) ## 0.U(4.W) } else { //-------------------------------------------------------------------- //-------------------------------------------------------------------- val roundAnyRawFNToRecFN = Module( new RoundAnyRawFNToRecFN( inExpWidth, inSigWidth, outExpWidth, outSigWidth, flRoundOpt_sigMSBitAlwaysZero )) roundAnyRawFNToRecFN.io.invalidExc := isSigNaNRawFloat(rawIn) roundAnyRawFNToRecFN.io.infiniteExc := false.B roundAnyRawFNToRecFN.io.in := rawIn roundAnyRawFNToRecFN.io.roundingMode := io.roundingMode roundAnyRawFNToRecFN.io.detectTininess := io.detectTininess io.out := roundAnyRawFNToRecFN.io.out io.exceptionFlags := roundAnyRawFNToRecFN.io.exceptionFlags } } File rawFloatFromRecFN.scala: /*============================================================================ This Chisel source file is part of a pre-release version of the HardFloat IEEE Floating-Point Arithmetic Package, by John R. Hauser (with some contributions from Yunsup Lee and Andrew Waterman, mainly concerning testing). Copyright 2010, 2011, 2012, 2013, 2014, 2015, 2016 The Regents of the University of California. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS "AS IS", AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ package hardfloat import chisel3._ import chisel3.util._ /*---------------------------------------------------------------------------- | In the result, no more than one of 'isNaN', 'isInf', and 'isZero' will be | set. *----------------------------------------------------------------------------*/ object rawFloatFromRecFN { def apply(expWidth: Int, sigWidth: Int, in: Bits): RawFloat = { val exp = in(expWidth + sigWidth - 1, sigWidth - 1) val isZero = exp(expWidth, expWidth - 2) === 0.U val isSpecial = exp(expWidth, expWidth - 1) === 3.U val out = Wire(new RawFloat(expWidth, sigWidth)) out.isNaN := isSpecial && exp(expWidth - 2) out.isInf := isSpecial && ! exp(expWidth - 2) out.isZero := isZero out.sign := in(expWidth + sigWidth) out.sExp := exp.zext out.sig := 0.U(1.W) ## ! isZero ## in(sigWidth - 2, 0) out } }
module RecFNToRecFN_160( // @[RecFNToRecFN.scala:44:5] input [32:0] io_in, // @[RecFNToRecFN.scala:48:16] output [32:0] io_out // @[RecFNToRecFN.scala:48:16] ); wire [32:0] io_in_0 = io_in; // @[RecFNToRecFN.scala:44:5] wire io_detectTininess = 1'h1; // @[RecFNToRecFN.scala:44:5, :48:16] wire [2:0] io_roundingMode = 3'h0; // @[RecFNToRecFN.scala:44:5, :48:16] wire [32:0] _io_out_T = io_in_0; // @[RecFNToRecFN.scala:44:5, :64:35] wire [4:0] _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:65:54] wire [32:0] io_out_0; // @[RecFNToRecFN.scala:44:5] wire [4:0] io_exceptionFlags; // @[RecFNToRecFN.scala:44:5] wire [8:0] rawIn_exp = io_in_0[31:23]; // @[rawFloatFromRecFN.scala:51:21] wire [2:0] _rawIn_isZero_T = rawIn_exp[8:6]; // @[rawFloatFromRecFN.scala:51:21, :52:28] wire rawIn_isZero = _rawIn_isZero_T == 3'h0; // @[rawFloatFromRecFN.scala:52:{28,53}] wire rawIn_isZero_0 = rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :55:23] wire [1:0] _rawIn_isSpecial_T = rawIn_exp[8:7]; // @[rawFloatFromRecFN.scala:51:21, :53:28] wire rawIn_isSpecial = &_rawIn_isSpecial_T; // @[rawFloatFromRecFN.scala:53:{28,53}] wire _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:56:33] wire _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:57:33] wire _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:59:25] wire [9:0] _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:60:27] wire [24:0] _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:61:44] wire rawIn_isNaN; // @[rawFloatFromRecFN.scala:55:23] wire rawIn_isInf; // @[rawFloatFromRecFN.scala:55:23] wire rawIn_sign; // @[rawFloatFromRecFN.scala:55:23] wire [9:0] rawIn_sExp; // @[rawFloatFromRecFN.scala:55:23] wire [24:0] rawIn_sig; // @[rawFloatFromRecFN.scala:55:23] wire _rawIn_out_isNaN_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41] wire _rawIn_out_isInf_T = rawIn_exp[6]; // @[rawFloatFromRecFN.scala:51:21, :56:41, :57:41] assign _rawIn_out_isNaN_T_1 = rawIn_isSpecial & _rawIn_out_isNaN_T; // @[rawFloatFromRecFN.scala:53:53, :56:{33,41}] assign rawIn_isNaN = _rawIn_out_isNaN_T_1; // @[rawFloatFromRecFN.scala:55:23, :56:33] wire _rawIn_out_isInf_T_1 = ~_rawIn_out_isInf_T; // @[rawFloatFromRecFN.scala:57:{36,41}] assign _rawIn_out_isInf_T_2 = rawIn_isSpecial & _rawIn_out_isInf_T_1; // @[rawFloatFromRecFN.scala:53:53, :57:{33,36}] assign rawIn_isInf = _rawIn_out_isInf_T_2; // @[rawFloatFromRecFN.scala:55:23, :57:33] assign _rawIn_out_sign_T = io_in_0[32]; // @[rawFloatFromRecFN.scala:59:25] assign rawIn_sign = _rawIn_out_sign_T; // @[rawFloatFromRecFN.scala:55:23, :59:25] assign _rawIn_out_sExp_T = {1'h0, rawIn_exp}; // @[rawFloatFromRecFN.scala:51:21, :60:27] assign rawIn_sExp = _rawIn_out_sExp_T; // @[rawFloatFromRecFN.scala:55:23, :60:27] wire _rawIn_out_sig_T = ~rawIn_isZero; // @[rawFloatFromRecFN.scala:52:53, :61:35] wire [1:0] _rawIn_out_sig_T_1 = {1'h0, _rawIn_out_sig_T}; // @[rawFloatFromRecFN.scala:61:{32,35}] wire [22:0] _rawIn_out_sig_T_2 = io_in_0[22:0]; // @[rawFloatFromRecFN.scala:61:49] assign _rawIn_out_sig_T_3 = {_rawIn_out_sig_T_1, _rawIn_out_sig_T_2}; // @[rawFloatFromRecFN.scala:61:{32,44,49}] assign rawIn_sig = _rawIn_out_sig_T_3; // @[rawFloatFromRecFN.scala:55:23, :61:44] assign io_out_0 = _io_out_T; // @[RecFNToRecFN.scala:44:5, :64:35] wire _io_exceptionFlags_T = rawIn_sig[22]; // @[rawFloatFromRecFN.scala:55:23] wire _io_exceptionFlags_T_1 = ~_io_exceptionFlags_T; // @[common.scala:82:{49,56}] wire _io_exceptionFlags_T_2 = rawIn_isNaN & _io_exceptionFlags_T_1; // @[rawFloatFromRecFN.scala:55:23] assign _io_exceptionFlags_T_3 = {_io_exceptionFlags_T_2, 4'h0}; // @[common.scala:82:46] assign io_exceptionFlags = _io_exceptionFlags_T_3; // @[RecFNToRecFN.scala:44:5, :65:54] assign io_out = io_out_0; // @[RecFNToRecFN.scala:44:5] endmodule
Generate the Verilog code corresponding to the following Chisel files. File Misc.scala: // See LICENSE.Berkeley for license details. // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ import chisel3.util.random.LFSR import org.chipsalliance.cde.config.Parameters import scala.math._ class ParameterizedBundle(implicit p: Parameters) extends Bundle trait Clocked extends Bundle { val clock = Clock() val reset = Bool() } object DecoupledHelper { def apply(rvs: Bool*) = new DecoupledHelper(rvs) } class DecoupledHelper(val rvs: Seq[Bool]) { def fire(exclude: Bool, includes: Bool*) = { require(rvs.contains(exclude), "Excluded Bool not present in DecoupledHelper! Note that DecoupledHelper uses referential equality for exclusion! If you don't want to exclude anything, use fire()!") (rvs.filter(_ ne exclude) ++ includes).reduce(_ && _) } def fire() = { rvs.reduce(_ && _) } } object MuxT { def apply[T <: Data, U <: Data](cond: Bool, con: (T, U), alt: (T, U)): (T, U) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2)) def apply[T <: Data, U <: Data, W <: Data](cond: Bool, con: (T, U, W), alt: (T, U, W)): (T, U, W) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3)) def apply[T <: Data, U <: Data, W <: Data, X <: Data](cond: Bool, con: (T, U, W, X), alt: (T, U, W, X)): (T, U, W, X) = (Mux(cond, con._1, alt._1), Mux(cond, con._2, alt._2), Mux(cond, con._3, alt._3), Mux(cond, con._4, alt._4)) } /** Creates a cascade of n MuxTs to search for a key value. */ object MuxTLookup { def apply[S <: UInt, T <: Data, U <: Data](key: S, default: (T, U), mapping: Seq[(S, (T, U))]): (T, U) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } def apply[S <: UInt, T <: Data, U <: Data, W <: Data](key: S, default: (T, U, W), mapping: Seq[(S, (T, U, W))]): (T, U, W) = { var res = default for ((k, v) <- mapping.reverse) res = MuxT(k === key, v, res) res } } object ValidMux { def apply[T <: Data](v1: ValidIO[T], v2: ValidIO[T]*): ValidIO[T] = { apply(v1 +: v2.toSeq) } def apply[T <: Data](valids: Seq[ValidIO[T]]): ValidIO[T] = { val out = Wire(Valid(valids.head.bits.cloneType)) out.valid := valids.map(_.valid).reduce(_ || _) out.bits := MuxCase(valids.head.bits, valids.map(v => (v.valid -> v.bits))) out } } object Str { def apply(s: String): UInt = { var i = BigInt(0) require(s.forall(validChar _)) for (c <- s) i = (i << 8) | c i.U((s.length*8).W) } def apply(x: Char): UInt = { require(validChar(x)) x.U(8.W) } def apply(x: UInt): UInt = apply(x, 10) def apply(x: UInt, radix: Int): UInt = { val rad = radix.U val w = x.getWidth require(w > 0) var q = x var s = digit(q % rad) for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad s = Cat(Mux((radix == 10).B && q === 0.U, Str(' '), digit(q % rad)), s) } s } def apply(x: SInt): UInt = apply(x, 10) def apply(x: SInt, radix: Int): UInt = { val neg = x < 0.S val abs = x.abs.asUInt if (radix != 10) { Cat(Mux(neg, Str('-'), Str(' ')), Str(abs, radix)) } else { val rad = radix.U val w = abs.getWidth require(w > 0) var q = abs var s = digit(q % rad) var needSign = neg for (i <- 1 until ceil(log(2)/log(radix)*w).toInt) { q = q / rad val placeSpace = q === 0.U val space = Mux(needSign, Str('-'), Str(' ')) needSign = needSign && !placeSpace s = Cat(Mux(placeSpace, space, digit(q % rad)), s) } Cat(Mux(needSign, Str('-'), Str(' ')), s) } } private def digit(d: UInt): UInt = Mux(d < 10.U, Str('0')+d, Str(('a'-10).toChar)+d)(7,0) private def validChar(x: Char) = x == (x & 0xFF) } object Split { def apply(x: UInt, n0: Int) = { val w = x.getWidth (x.extract(w-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } def apply(x: UInt, n2: Int, n1: Int, n0: Int) = { val w = x.getWidth (x.extract(w-1,n2), x.extract(n2-1,n1), x.extract(n1-1,n0), x.extract(n0-1,0)) } } object Random { def apply(mod: Int, random: UInt): UInt = { if (isPow2(mod)) random.extract(log2Ceil(mod)-1,0) else PriorityEncoder(partition(apply(1 << log2Up(mod*8), random), mod)) } def apply(mod: Int): UInt = apply(mod, randomizer) def oneHot(mod: Int, random: UInt): UInt = { if (isPow2(mod)) UIntToOH(random(log2Up(mod)-1,0)) else PriorityEncoderOH(partition(apply(1 << log2Up(mod*8), random), mod)).asUInt } def oneHot(mod: Int): UInt = oneHot(mod, randomizer) private def randomizer = LFSR(16) private def partition(value: UInt, slices: Int) = Seq.tabulate(slices)(i => value < (((i + 1) << value.getWidth) / slices).U) } object Majority { def apply(in: Set[Bool]): Bool = { val n = (in.size >> 1) + 1 val clauses = in.subsets(n).map(_.reduce(_ && _)) clauses.reduce(_ || _) } def apply(in: Seq[Bool]): Bool = apply(in.toSet) def apply(in: UInt): Bool = apply(in.asBools.toSet) } object PopCountAtLeast { private def two(x: UInt): (Bool, Bool) = x.getWidth match { case 1 => (x.asBool, false.B) case n => val half = x.getWidth / 2 val (leftOne, leftTwo) = two(x(half - 1, 0)) val (rightOne, rightTwo) = two(x(x.getWidth - 1, half)) (leftOne || rightOne, leftTwo || rightTwo || (leftOne && rightOne)) } def apply(x: UInt, n: Int): Bool = n match { case 0 => true.B case 1 => x.orR case 2 => two(x)._2 case 3 => PopCount(x) >= n.U } } // This gets used everywhere, so make the smallest circuit possible ... // Given an address and size, create a mask of beatBytes size // eg: (0x3, 0, 4) => 0001, (0x3, 1, 4) => 0011, (0x3, 2, 4) => 1111 // groupBy applies an interleaved OR reduction; groupBy=2 take 0010 => 01 object MaskGen { def apply(addr_lo: UInt, lgSize: UInt, beatBytes: Int, groupBy: Int = 1): UInt = { require (groupBy >= 1 && beatBytes >= groupBy) require (isPow2(beatBytes) && isPow2(groupBy)) val lgBytes = log2Ceil(beatBytes) val sizeOH = UIntToOH(lgSize | 0.U(log2Up(beatBytes).W), log2Up(beatBytes)) | (groupBy*2 - 1).U def helper(i: Int): Seq[(Bool, Bool)] = { if (i == 0) { Seq((lgSize >= lgBytes.asUInt, true.B)) } else { val sub = helper(i-1) val size = sizeOH(lgBytes - i) val bit = addr_lo(lgBytes - i) val nbit = !bit Seq.tabulate (1 << i) { j => val (sub_acc, sub_eq) = sub(j/2) val eq = sub_eq && (if (j % 2 == 1) bit else nbit) val acc = sub_acc || (size && eq) (acc, eq) } } } if (groupBy == beatBytes) 1.U else Cat(helper(lgBytes-log2Ceil(groupBy)).map(_._1).reverse) } } File Plic.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.devices.tilelink import chisel3._ import chisel3.experimental._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.diplomacy.{AddressSet} import freechips.rocketchip.resources.{Description, Resource, ResourceBinding, ResourceBindings, ResourceInt, SimpleDevice} import freechips.rocketchip.interrupts.{IntNexusNode, IntSinkParameters, IntSinkPortParameters, IntSourceParameters, IntSourcePortParameters} import freechips.rocketchip.regmapper.{RegField, RegFieldDesc, RegFieldRdAction, RegFieldWrType, RegReadFn, RegWriteFn} import freechips.rocketchip.subsystem.{BaseSubsystem, CBUS, TLBusWrapperLocation} import freechips.rocketchip.tilelink.{TLFragmenter, TLRegisterNode} import freechips.rocketchip.util.{Annotated, MuxT, property} import scala.math.min import freechips.rocketchip.util.UIntToAugmentedUInt import freechips.rocketchip.util.SeqToAugmentedSeq class GatewayPLICIO extends Bundle { val valid = Output(Bool()) val ready = Input(Bool()) val complete = Input(Bool()) } class LevelGateway extends Module { val io = IO(new Bundle { val interrupt = Input(Bool()) val plic = new GatewayPLICIO }) val inFlight = RegInit(false.B) when (io.interrupt && io.plic.ready) { inFlight := true.B } when (io.plic.complete) { inFlight := false.B } io.plic.valid := io.interrupt && !inFlight } object PLICConsts { def maxDevices = 1023 def maxMaxHarts = 15872 def priorityBase = 0x0 def pendingBase = 0x1000 def enableBase = 0x2000 def hartBase = 0x200000 def claimOffset = 4 def priorityBytes = 4 def enableOffset(i: Int) = i * ((maxDevices+7)/8) def hartOffset(i: Int) = i * 0x1000 def enableBase(i: Int):Int = enableOffset(i) + enableBase def hartBase(i: Int):Int = hartOffset(i) + hartBase def size(maxHarts: Int): Int = { require(maxHarts > 0 && maxHarts <= maxMaxHarts, s"Must be: maxHarts=$maxHarts > 0 && maxHarts <= PLICConsts.maxMaxHarts=${PLICConsts.maxMaxHarts}") 1 << log2Ceil(hartBase(maxHarts)) } require(hartBase >= enableBase(maxMaxHarts)) } case class PLICParams(baseAddress: BigInt = 0xC000000, maxPriorities: Int = 7, intStages: Int = 0, maxHarts: Int = PLICConsts.maxMaxHarts) { require (maxPriorities >= 0) def address = AddressSet(baseAddress, PLICConsts.size(maxHarts)-1) } case object PLICKey extends Field[Option[PLICParams]](None) case class PLICAttachParams( slaveWhere: TLBusWrapperLocation = CBUS ) case object PLICAttachKey extends Field(PLICAttachParams()) /** Platform-Level Interrupt Controller */ class TLPLIC(params: PLICParams, beatBytes: Int)(implicit p: Parameters) extends LazyModule { // plic0 => max devices 1023 val device: SimpleDevice = new SimpleDevice("interrupt-controller", Seq("riscv,plic0")) { override val alwaysExtended = true override def describe(resources: ResourceBindings): Description = { val Description(name, mapping) = super.describe(resources) val extra = Map( "interrupt-controller" -> Nil, "riscv,ndev" -> Seq(ResourceInt(nDevices)), "riscv,max-priority" -> Seq(ResourceInt(nPriorities)), "#interrupt-cells" -> Seq(ResourceInt(1))) Description(name, mapping ++ extra) } } val node : TLRegisterNode = TLRegisterNode( address = Seq(params.address), device = device, beatBytes = beatBytes, undefZero = true, concurrency = 1) // limiting concurrency handles RAW hazards on claim registers val intnode: IntNexusNode = IntNexusNode( sourceFn = { _ => IntSourcePortParameters(Seq(IntSourceParameters(1, Seq(Resource(device, "int"))))) }, sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) }, outputRequiresInput = false, inputRequiresOutput = false) /* Negotiated sizes */ def nDevices: Int = intnode.edges.in.map(_.source.num).sum def minPriorities = min(params.maxPriorities, nDevices) def nPriorities = (1 << log2Ceil(minPriorities+1)) - 1 // round up to next 2^n-1 def nHarts = intnode.edges.out.map(_.source.num).sum // Assign all the devices unique ranges lazy val sources = intnode.edges.in.map(_.source) lazy val flatSources = (sources zip sources.map(_.num).scanLeft(0)(_+_).init).map { case (s, o) => s.sources.map(z => z.copy(range = z.range.offset(o))) }.flatten ResourceBinding { flatSources.foreach { s => s.resources.foreach { r => // +1 because interrupt 0 is reserved (s.range.start until s.range.end).foreach { i => r.bind(device, ResourceInt(i+1)) } } } } lazy val module = new Impl class Impl extends LazyModuleImp(this) { Annotated.params(this, params) val (io_devices, edgesIn) = intnode.in.unzip val (io_harts, _) = intnode.out.unzip // Compact the interrupt vector the same way val interrupts = intnode.in.map { case (i, e) => i.take(e.source.num) }.flatten // This flattens the harts into an MSMSMSMSMS... or MMMMM.... sequence val harts = io_harts.flatten def getNInterrupts = interrupts.size println(s"Interrupt map (${nHarts} harts ${nDevices} interrupts):") flatSources.foreach { s => // +1 because 0 is reserved, +1-1 because the range is half-open println(s" [${s.range.start+1}, ${s.range.end}] => ${s.name}") } println("") require (nDevices == interrupts.size, s"Must be: nDevices=$nDevices == interrupts.size=${interrupts.size}") require (nHarts == harts.size, s"Must be: nHarts=$nHarts == harts.size=${harts.size}") require(nDevices <= PLICConsts.maxDevices, s"Must be: nDevices=$nDevices <= PLICConsts.maxDevices=${PLICConsts.maxDevices}") require(nHarts > 0 && nHarts <= params.maxHarts, s"Must be: nHarts=$nHarts > 0 && nHarts <= PLICParams.maxHarts=${params.maxHarts}") // For now, use LevelGateways for all TL2 interrupts val gateways = interrupts.map { case i => val gateway = Module(new LevelGateway) gateway.io.interrupt := i gateway.io.plic } val prioBits = log2Ceil(nPriorities+1) val priority = if (nPriorities > 0) Reg(Vec(nDevices, UInt(prioBits.W))) else WireDefault(VecInit.fill(nDevices max 1)(1.U)) val threshold = if (nPriorities > 0) Reg(Vec(nHarts, UInt(prioBits.W))) else WireDefault(VecInit.fill(nHarts)(0.U)) val pending = RegInit(VecInit.fill(nDevices max 1){false.B}) /* Construct the enable registers, chunked into 8-bit segments to reduce verilog size */ val firstEnable = nDevices min 7 val fullEnables = (nDevices - firstEnable) / 8 val tailEnable = nDevices - firstEnable - 8*fullEnables def enableRegs = (Reg(UInt(firstEnable.W)) +: Seq.fill(fullEnables) { Reg(UInt(8.W)) }) ++ (if (tailEnable > 0) Some(Reg(UInt(tailEnable.W))) else None) val enables = Seq.fill(nHarts) { enableRegs } val enableVec = VecInit(enables.map(x => Cat(x.reverse))) val enableVec0 = VecInit(enableVec.map(x => Cat(x, 0.U(1.W)))) val maxDevs = Reg(Vec(nHarts, UInt(log2Ceil(nDevices+1).W))) val pendingUInt = Cat(pending.reverse) if(nDevices > 0) { for (hart <- 0 until nHarts) { val fanin = Module(new PLICFanIn(nDevices, prioBits)) fanin.io.prio := priority fanin.io.ip := enableVec(hart) & pendingUInt maxDevs(hart) := fanin.io.dev harts(hart) := ShiftRegister(RegNext(fanin.io.max) > threshold(hart), params.intStages) } } // Priority registers are 32-bit aligned so treat each as its own group. // Otherwise, the off-by-one nature of the priority registers gets confusing. require(PLICConsts.priorityBytes == 4, s"PLIC Priority register descriptions assume 32-bits per priority, not ${PLICConsts.priorityBytes}") def priorityRegDesc(i: Int) = RegFieldDesc( name = s"priority_$i", desc = s"Acting priority of interrupt source $i", group = Some(s"priority_${i}"), groupDesc = Some(s"Acting priority of interrupt source ${i}"), reset = if (nPriorities > 0) None else Some(1)) def pendingRegDesc(i: Int) = RegFieldDesc( name = s"pending_$i", desc = s"Set to 1 if interrupt source $i is pending, regardless of its enable or priority setting.", group = Some("pending"), groupDesc = Some("Pending Bit Array. 1 Bit for each interrupt source."), volatile = true) def enableRegDesc(i: Int, j: Int, wide: Int) = { val low = if (j == 0) 1 else j*8 val high = low + wide - 1 RegFieldDesc( name = s"enables_${j}", desc = s"Targets ${low}-${high}. Set bits to 1 if interrupt should be enabled.", group = Some(s"enables_${i}"), groupDesc = Some(s"Enable bits for each interrupt source for target $i. 1 bit for each interrupt source.")) } def priorityRegField(x: UInt, i: Int) = if (nPriorities > 0) { RegField(prioBits, x, priorityRegDesc(i)) } else { RegField.r(prioBits, x, priorityRegDesc(i)) } val priorityRegFields = priority.zipWithIndex.map { case (p, i) => PLICConsts.priorityBase+PLICConsts.priorityBytes*(i+1) -> Seq(priorityRegField(p, i+1)) } val pendingRegFields = Seq(PLICConsts.pendingBase -> (RegField(1) +: pending.zipWithIndex.map { case (b, i) => RegField.r(1, b, pendingRegDesc(i+1))})) val enableRegFields = enables.zipWithIndex.map { case (e, i) => PLICConsts.enableBase(i) -> (RegField(1) +: e.zipWithIndex.map { case (x, j) => RegField(x.getWidth, x, enableRegDesc(i, j, x.getWidth)) }) } // When a hart reads a claim/complete register, then the // device which is currently its highest priority is no longer pending. // This code exploits the fact that, practically, only one claim/complete // register can be read at a time. We check for this because if the address map // were to change, it may no longer be true. // Note: PLIC doesn't care which hart reads the register. val claimer = Wire(Vec(nHarts, Bool())) assert((claimer.asUInt & (claimer.asUInt - 1.U)) === 0.U) // One-Hot val claiming = Seq.tabulate(nHarts){i => Mux(claimer(i), maxDevs(i), 0.U)}.reduceLeft(_|_) val claimedDevs = VecInit(UIntToOH(claiming, nDevices+1).asBools) ((pending zip gateways) zip claimedDevs.tail) foreach { case ((p, g), c) => g.ready := !p when (c || g.valid) { p := !c } } // When a hart writes a claim/complete register, then // the written device (as long as it is actually enabled for that // hart) is marked complete. // This code exploits the fact that, practically, only one claim/complete register // can be written at a time. We check for this because if the address map // were to change, it may no longer be true. // Note -- PLIC doesn't care which hart writes the register. val completer = Wire(Vec(nHarts, Bool())) assert((completer.asUInt & (completer.asUInt - 1.U)) === 0.U) // One-Hot val completerDev = Wire(UInt(log2Up(nDevices + 1).W)) val completedDevs = Mux(completer.reduce(_ || _), UIntToOH(completerDev, nDevices+1), 0.U) (gateways zip completedDevs.asBools.tail) foreach { case (g, c) => g.complete := c } def thresholdRegDesc(i: Int) = RegFieldDesc( name = s"threshold_$i", desc = s"Interrupt & claim threshold for target $i. Maximum value is ${nPriorities}.", reset = if (nPriorities > 0) None else Some(1)) def thresholdRegField(x: UInt, i: Int) = if (nPriorities > 0) { RegField(prioBits, x, thresholdRegDesc(i)) } else { RegField.r(prioBits, x, thresholdRegDesc(i)) } val hartRegFields = Seq.tabulate(nHarts) { i => PLICConsts.hartBase(i) -> Seq( thresholdRegField(threshold(i), i), RegField(32-prioBits), RegField(32, RegReadFn { valid => claimer(i) := valid (true.B, maxDevs(i)) }, RegWriteFn { (valid, data) => assert(completerDev === data.extract(log2Ceil(nDevices+1)-1, 0), "completerDev should be consistent for all harts") completerDev := data.extract(log2Ceil(nDevices+1)-1, 0) completer(i) := valid && enableVec0(i)(completerDev) true.B }, Some(RegFieldDesc(s"claim_complete_$i", s"Claim/Complete register for Target $i. Reading this register returns the claimed interrupt number and makes it no longer pending." + s"Writing the interrupt number back completes the interrupt.", reset = None, wrType = Some(RegFieldWrType.MODIFY), rdAction = Some(RegFieldRdAction.MODIFY), volatile = true)) ) ) } node.regmap((priorityRegFields ++ pendingRegFields ++ enableRegFields ++ hartRegFields):_*) if (nDevices >= 2) { val claimed = claimer(0) && maxDevs(0) > 0.U val completed = completer(0) property.cover(claimed && RegEnable(claimed, false.B, claimed || completed), "TWO_CLAIMS", "two claims with no intervening complete") property.cover(completed && RegEnable(completed, false.B, claimed || completed), "TWO_COMPLETES", "two completes with no intervening claim") val ep = enables(0).asUInt & pending.asUInt val ep2 = RegNext(ep) val diff = ep & ~ep2 property.cover((diff & (diff - 1.U)) =/= 0.U, "TWO_INTS_PENDING", "two enabled interrupts became pending on same cycle") if (nPriorities > 0) ccover(maxDevs(0) > (1.U << priority(0).getWidth) && maxDevs(0) <= Cat(1.U, threshold(0)), "THRESHOLD", "interrupt pending but less than threshold") } def ccover(cond: Bool, label: String, desc: String)(implicit sourceInfo: SourceInfo) = property.cover(cond, s"PLIC_$label", "Interrupts;;" + desc) } } class PLICFanIn(nDevices: Int, prioBits: Int) extends Module { val io = IO(new Bundle { val prio = Flipped(Vec(nDevices, UInt(prioBits.W))) val ip = Flipped(UInt(nDevices.W)) val dev = UInt(log2Ceil(nDevices+1).W) val max = UInt(prioBits.W) }) def findMax(x: Seq[UInt]): (UInt, UInt) = { if (x.length > 1) { val half = 1 << (log2Ceil(x.length) - 1) val left = findMax(x take half) val right = findMax(x drop half) MuxT(left._1 >= right._1, left, (right._1, half.U | right._2)) } else (x.head, 0.U) } val effectivePriority = (1.U << prioBits) +: (io.ip.asBools zip io.prio).map { case (p, x) => Cat(p, x) } val (maxPri, maxDev) = findMax(effectivePriority) io.max := maxPri // strips the always-constant high '1' bit io.dev := maxDev } /** Trait that will connect a PLIC to a subsystem */ trait CanHavePeripheryPLIC { this: BaseSubsystem => val (plicOpt, plicDomainOpt) = p(PLICKey).map { params => val tlbus = locateTLBusWrapper(p(PLICAttachKey).slaveWhere) val plicDomainWrapper = tlbus.generateSynchronousDomain("PLIC").suggestName("plic_domain") val plic = plicDomainWrapper { LazyModule(new TLPLIC(params, tlbus.beatBytes)) } plicDomainWrapper { plic.node := tlbus.coupleTo("plic") { TLFragmenter(tlbus, Some("PLIC")) := _ } } plicDomainWrapper { plic.intnode :=* ibus.toPLIC } (plic, plicDomainWrapper) }.unzip }
module PLICFanIn_7( // @[Plic.scala:338:7] input clock, // @[Plic.scala:338:7] input reset, // @[Plic.scala:338:7] input io_prio_0, // @[Plic.scala:339:14] input io_ip, // @[Plic.scala:339:14] output io_dev, // @[Plic.scala:339:14] output io_max // @[Plic.scala:339:14] ); wire io_prio_0_0 = io_prio_0; // @[Plic.scala:338:7] wire io_ip_0 = io_ip; // @[Plic.scala:338:7] wire [1:0] effectivePriority_0 = 2'h2; // @[Plic.scala:355:32] wire _effectivePriority_T = io_ip_0; // @[Plic.scala:338:7, :355:55] wire maxDev; // @[Misc.scala:35:36] wire io_dev_0; // @[Plic.scala:338:7] wire io_max_0; // @[Plic.scala:338:7] wire [1:0] effectivePriority_1 = {_effectivePriority_T, io_prio_0_0}; // @[Plic.scala:338:7, :355:{55,100}] wire [1:0] maxPri = effectivePriority_1 != 2'h3 ? 2'h2 : effectivePriority_1; // @[Misc.scala:35:9] assign maxDev = &effectivePriority_1; // @[Misc.scala:35:36] assign io_dev_0 = maxDev; // @[Misc.scala:35:36] assign io_max_0 = maxPri[0]; // @[Misc.scala:35:9] assign io_dev = io_dev_0; // @[Plic.scala:338:7] assign io_max = io_max_0; // @[Plic.scala:338:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File PE.scala: // See README.md for license details. package gemmini import chisel3._ import chisel3.util._ class PEControl[T <: Data : Arithmetic](accType: T) extends Bundle { val dataflow = UInt(1.W) // TODO make this an Enum val propagate = UInt(1.W) // Which register should be propagated (and which should be accumulated)? val shift = UInt(log2Up(accType.getWidth).W) // TODO this isn't correct for Floats } class MacUnit[T <: Data](inputType: T, cType: T, dType: T) (implicit ev: Arithmetic[T]) extends Module { import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(inputType) val in_c = Input(cType) val out_d = Output(dType) }) io.out_d := io.in_c.mac(io.in_a, io.in_b) } // TODO update documentation /** * A PE implementing a MAC operation. Configured as fully combinational when integrated into a Mesh. * @param width Data width of operands */ class PE[T <: Data](inputType: T, outputType: T, accType: T, df: Dataflow.Value, max_simultaneous_matmuls: Int) (implicit ev: Arithmetic[T]) extends Module { // Debugging variables import ev._ val io = IO(new Bundle { val in_a = Input(inputType) val in_b = Input(outputType) val in_d = Input(outputType) val out_a = Output(inputType) val out_b = Output(outputType) val out_c = Output(outputType) val in_control = Input(new PEControl(accType)) val out_control = Output(new PEControl(accType)) val in_id = Input(UInt(log2Up(max_simultaneous_matmuls).W)) val out_id = Output(UInt(log2Up(max_simultaneous_matmuls).W)) val in_last = Input(Bool()) val out_last = Output(Bool()) val in_valid = Input(Bool()) val out_valid = Output(Bool()) val bad_dataflow = Output(Bool()) }) val cType = if (df == Dataflow.WS) inputType else accType // When creating PEs that support multiple dataflows, the // elaboration/synthesis tools often fail to consolidate and de-duplicate // MAC units. To force mac circuitry to be re-used, we create a "mac_unit" // module here which just performs a single MAC operation val mac_unit = Module(new MacUnit(inputType, if (df == Dataflow.WS) outputType else accType, outputType)) val a = io.in_a val b = io.in_b val d = io.in_d val c1 = Reg(cType) val c2 = Reg(cType) val dataflow = io.in_control.dataflow val prop = io.in_control.propagate val shift = io.in_control.shift val id = io.in_id val last = io.in_last val valid = io.in_valid io.out_a := a io.out_control.dataflow := dataflow io.out_control.propagate := prop io.out_control.shift := shift io.out_id := id io.out_last := last io.out_valid := valid mac_unit.io.in_a := a val last_s = RegEnable(prop, valid) val flip = last_s =/= prop val shift_offset = Mux(flip, shift, 0.U) // Which dataflow are we using? val OUTPUT_STATIONARY = Dataflow.OS.id.U(1.W) val WEIGHT_STATIONARY = Dataflow.WS.id.U(1.W) // Is c1 being computed on, or propagated forward (in the output-stationary dataflow)? val COMPUTE = 0.U(1.W) val PROPAGATE = 1.U(1.W) io.bad_dataflow := false.B when ((df == Dataflow.OS).B || ((df == Dataflow.BOTH).B && dataflow === OUTPUT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := (c1 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 c2 := mac_unit.io.out_d c1 := d.withWidthOf(cType) }.otherwise { io.out_c := (c2 >> shift_offset).clippedToWidthOf(outputType) io.out_b := b mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c1 c1 := mac_unit.io.out_d c2 := d.withWidthOf(cType) } }.elsewhen ((df == Dataflow.WS).B || ((df == Dataflow.BOTH).B && dataflow === WEIGHT_STATIONARY)) { when(prop === PROPAGATE) { io.out_c := c1 mac_unit.io.in_b := c2.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c1 := d }.otherwise { io.out_c := c2 mac_unit.io.in_b := c1.asTypeOf(inputType) mac_unit.io.in_c := b io.out_b := mac_unit.io.out_d c2 := d } }.otherwise { io.bad_dataflow := true.B //assert(false.B, "unknown dataflow") io.out_c := DontCare io.out_b := DontCare mac_unit.io.in_b := b.asTypeOf(inputType) mac_unit.io.in_c := c2 } when (!valid) { c1 := c1 c2 := c2 mac_unit.io.in_b := DontCare mac_unit.io.in_c := DontCare } } File Arithmetic.scala: // A simple type class for Chisel datatypes that can add and multiply. To add your own type, simply create your own: // implicit MyTypeArithmetic extends Arithmetic[MyType] { ... } package gemmini import chisel3._ import chisel3.util._ import hardfloat._ // Bundles that represent the raw bits of custom datatypes case class Float(expWidth: Int, sigWidth: Int) extends Bundle { val bits = UInt((expWidth + sigWidth).W) val bias: Int = (1 << (expWidth-1)) - 1 } case class DummySInt(w: Int) extends Bundle { val bits = UInt(w.W) def dontCare: DummySInt = { val o = Wire(new DummySInt(w)) o.bits := 0.U o } } // The Arithmetic typeclass which implements various arithmetic operations on custom datatypes abstract class Arithmetic[T <: Data] { implicit def cast(t: T): ArithmeticOps[T] } abstract class ArithmeticOps[T <: Data](self: T) { def *(t: T): T def mac(m1: T, m2: T): T // Returns (m1 * m2 + self) def +(t: T): T def -(t: T): T def >>(u: UInt): T // This is a rounding shift! Rounds away from 0 def >(t: T): Bool def identity: T def withWidthOf(t: T): T def clippedToWidthOf(t: T): T // Like "withWidthOf", except that it saturates def relu: T def zero: T def minimum: T // Optional parameters, which only need to be defined if you want to enable various optimizations for transformers def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[T])] = None def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = None def mult_with_reciprocal[U <: Data](reciprocal: U) = self } object Arithmetic { implicit object UIntArithmetic extends Arithmetic[UInt] { override implicit def cast(self: UInt) = new ArithmeticOps(self) { override def *(t: UInt) = self * t override def mac(m1: UInt, m2: UInt) = m1 * m2 + self override def +(t: UInt) = self + t override def -(t: UInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = point_five & (zeros | ones_digit) (self >> u).asUInt + r } override def >(t: UInt): Bool = self > t override def withWidthOf(t: UInt) = self.asTypeOf(t) override def clippedToWidthOf(t: UInt) = { val sat = ((1 << (t.getWidth-1))-1).U Mux(self > sat, sat, self)(t.getWidth-1, 0) } override def relu: UInt = self override def zero: UInt = 0.U override def identity: UInt = 1.U override def minimum: UInt = 0.U } } implicit object SIntArithmetic extends Arithmetic[SInt] { override implicit def cast(self: SInt) = new ArithmeticOps(self) { override def *(t: SInt) = self * t override def mac(m1: SInt, m2: SInt) = m1 * m2 + self override def +(t: SInt) = self + t override def -(t: SInt) = self - t override def >>(u: UInt) = { // The equation we use can be found here: https://riscv.github.io/documents/riscv-v-spec/#_vector_fixed_point_rounding_mode_register_vxrm // TODO Do we need to explicitly handle the cases where "u" is a small number (like 0)? What is the default behavior here? val point_five = Mux(u === 0.U, 0.U, self(u - 1.U)) val zeros = Mux(u <= 1.U, 0.U, self.asUInt & ((1.U << (u - 1.U)).asUInt - 1.U)) =/= 0.U val ones_digit = self(u) val r = (point_five & (zeros | ones_digit)).asBool (self >> u).asSInt + Mux(r, 1.S, 0.S) } override def >(t: SInt): Bool = self > t override def withWidthOf(t: SInt) = { if (self.getWidth >= t.getWidth) self(t.getWidth-1, 0).asSInt else { val sign_bits = t.getWidth - self.getWidth val sign = self(self.getWidth-1) Cat(Cat(Seq.fill(sign_bits)(sign)), self).asTypeOf(t) } } override def clippedToWidthOf(t: SInt): SInt = { val maxsat = ((1 << (t.getWidth-1))-1).S val minsat = (-(1 << (t.getWidth-1))).S MuxCase(self, Seq((self > maxsat) -> maxsat, (self < minsat) -> minsat))(t.getWidth-1, 0).asSInt } override def relu: SInt = Mux(self >= 0.S, self, 0.S) override def zero: SInt = 0.S override def identity: SInt = 1.S override def minimum: SInt = (-(1 << (self.getWidth-1))).S override def divider(denom_t: UInt, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(denom_t.cloneType)) val output = Wire(Decoupled(self.cloneType)) // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def sin_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def uin_to_float(x: UInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := x in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = sin_to_float(self) val denom_rec = uin_to_float(input.bits) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := self_rec divider.io.b := denom_rec divider.io.roundingMode := consts.round_minMag divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := float_to_in(divider.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def sqrt: Option[(DecoupledIO[UInt], DecoupledIO[SInt])] = { // TODO this uses a floating point divider, but we should use an integer divider instead val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(self.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider val expWidth = log2Up(self.getWidth) + 1 val sigWidth = self.getWidth def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_minMag // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag // consts.round_near_maxMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) // Instantiate the hardloat sqrt val sqrter = Module(new DivSqrtRecFN_small(expWidth, sigWidth, 0)) input.ready := sqrter.io.inReady sqrter.io.inValid := input.valid sqrter.io.sqrtOp := true.B sqrter.io.a := self_rec sqrter.io.b := DontCare sqrter.io.roundingMode := consts.round_minMag sqrter.io.detectTininess := consts.tininess_afterRounding output.valid := sqrter.io.outValid_sqrt output.bits := float_to_in(sqrter.io.out) assert(!output.valid || output.ready) Some((input, output)) } override def reciprocal[U <: Data](u: U, options: Int = 0): Option[(DecoupledIO[UInt], DecoupledIO[U])] = u match { case Float(expWidth, sigWidth) => val input = Wire(Decoupled(UInt(0.W))) val output = Wire(Decoupled(u.cloneType)) input.bits := DontCare // We translate our integer to floating-point form so that we can use the hardfloat divider def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } val self_rec = in_to_float(self) val one_rec = in_to_float(1.S) // Instantiate the hardloat divider val divider = Module(new DivSqrtRecFN_small(expWidth, sigWidth, options)) input.ready := divider.io.inReady divider.io.inValid := input.valid divider.io.sqrtOp := false.B divider.io.a := one_rec divider.io.b := self_rec divider.io.roundingMode := consts.round_near_even divider.io.detectTininess := consts.tininess_afterRounding output.valid := divider.io.outValid_div output.bits := fNFromRecFN(expWidth, sigWidth, divider.io.out).asTypeOf(u) assert(!output.valid || output.ready) Some((input, output)) case _ => None } override def mult_with_reciprocal[U <: Data](reciprocal: U): SInt = reciprocal match { case recip @ Float(expWidth, sigWidth) => def in_to_float(x: SInt) = { val in_to_rec_fn = Module(new INToRecFN(intWidth = self.getWidth, expWidth, sigWidth)) in_to_rec_fn.io.signedIn := true.B in_to_rec_fn.io.in := x.asUInt in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding in_to_rec_fn.io.out } def float_to_in(x: UInt) = { val rec_fn_to_in = Module(new RecFNToIN(expWidth = expWidth, sigWidth, self.getWidth)) rec_fn_to_in.io.signedOut := true.B rec_fn_to_in.io.in := x rec_fn_to_in.io.roundingMode := consts.round_minMag rec_fn_to_in.io.out.asSInt } val self_rec = in_to_float(self) val reciprocal_rec = recFNFromFN(expWidth, sigWidth, recip.bits) // Instantiate the hardloat divider val muladder = Module(new MulRecFN(expWidth, sigWidth)) muladder.io.roundingMode := consts.round_near_even muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := reciprocal_rec float_to_in(muladder.io.out) case _ => self } } } implicit object FloatArithmetic extends Arithmetic[Float] { // TODO Floating point arithmetic currently switches between recoded and standard formats for every operation. However, it should stay in the recoded format as it travels through the systolic array override implicit def cast(self: Float): ArithmeticOps[Float] = new ArithmeticOps(self) { override def *(t: Float): Float = { val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := t_rec_resized val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def mac(m1: Float, m2: Float): Float = { // Recode all operands val m1_rec = recFNFromFN(m1.expWidth, m1.sigWidth, m1.bits) val m2_rec = recFNFromFN(m2.expWidth, m2.sigWidth, m2.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize m1 to self's width val m1_resizer = Module(new RecFNToRecFN(m1.expWidth, m1.sigWidth, self.expWidth, self.sigWidth)) m1_resizer.io.in := m1_rec m1_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m1_resizer.io.detectTininess := consts.tininess_afterRounding val m1_rec_resized = m1_resizer.io.out // Resize m2 to self's width val m2_resizer = Module(new RecFNToRecFN(m2.expWidth, m2.sigWidth, self.expWidth, self.sigWidth)) m2_resizer.io.in := m2_rec m2_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag m2_resizer.io.detectTininess := consts.tininess_afterRounding val m2_rec_resized = m2_resizer.io.out // Perform multiply-add val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := m1_rec_resized muladder.io.b := m2_rec_resized muladder.io.c := self_rec // Convert result to standard format // TODO remove these intermediate recodings val out = Wire(Float(self.expWidth, self.sigWidth)) out.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) out } override def +(t: Float): Float = { require(self.getWidth >= t.getWidth) // This just makes it easier to write the resizing code // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Generate 1 as a float val in_to_rec_fn = Module(new INToRecFN(1, self.expWidth, self.sigWidth)) in_to_rec_fn.io.signedIn := false.B in_to_rec_fn.io.in := 1.U in_to_rec_fn.io.roundingMode := consts.round_near_even // consts.round_near_maxMag in_to_rec_fn.io.detectTininess := consts.tininess_afterRounding val one_rec = in_to_rec_fn.io.out // Resize t val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out // Perform addition val muladder = Module(new MulAddRecFN(self.expWidth, self.sigWidth)) muladder.io.op := 0.U muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := t_rec_resized muladder.io.b := one_rec muladder.io.c := self_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def -(t: Float): Float = { val t_sgn = t.bits(t.getWidth-1) val neg_t = Cat(~t_sgn, t.bits(t.getWidth-2,0)).asTypeOf(t) self + neg_t } override def >>(u: UInt): Float = { // Recode self val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Get 2^(-u) as a recoded float val shift_exp = Wire(UInt(self.expWidth.W)) shift_exp := self.bias.U - u val shift_fn = Cat(0.U(1.W), shift_exp, 0.U((self.sigWidth-1).W)) val shift_rec = recFNFromFN(self.expWidth, self.sigWidth, shift_fn) assert(shift_exp =/= 0.U, "scaling by denormalized numbers is not currently supported") // Multiply self and 2^(-u) val muladder = Module(new MulRecFN(self.expWidth, self.sigWidth)) muladder.io.roundingMode := consts.round_near_even // consts.round_near_maxMag muladder.io.detectTininess := consts.tininess_afterRounding muladder.io.a := self_rec muladder.io.b := shift_rec val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := fNFromRecFN(self.expWidth, self.sigWidth, muladder.io.out) result } override def >(t: Float): Bool = { // Recode all operands val t_rec = recFNFromFN(t.expWidth, t.sigWidth, t.bits) val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) // Resize t to self's width val t_resizer = Module(new RecFNToRecFN(t.expWidth, t.sigWidth, self.expWidth, self.sigWidth)) t_resizer.io.in := t_rec t_resizer.io.roundingMode := consts.round_near_even t_resizer.io.detectTininess := consts.tininess_afterRounding val t_rec_resized = t_resizer.io.out val comparator = Module(new CompareRecFN(self.expWidth, self.sigWidth)) comparator.io.a := self_rec comparator.io.b := t_rec_resized comparator.io.signaling := false.B comparator.io.gt } override def withWidthOf(t: Float): Float = { val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def clippedToWidthOf(t: Float): Float = { // TODO check for overflow. Right now, we just assume that overflow doesn't happen val self_rec = recFNFromFN(self.expWidth, self.sigWidth, self.bits) val resizer = Module(new RecFNToRecFN(self.expWidth, self.sigWidth, t.expWidth, t.sigWidth)) resizer.io.in := self_rec resizer.io.roundingMode := consts.round_near_even // consts.round_near_maxMag resizer.io.detectTininess := consts.tininess_afterRounding val result = Wire(Float(t.expWidth, t.sigWidth)) result.bits := fNFromRecFN(t.expWidth, t.sigWidth, resizer.io.out) result } override def relu: Float = { val raw = rawFloatFromFN(self.expWidth, self.sigWidth, self.bits) val result = Wire(Float(self.expWidth, self.sigWidth)) result.bits := Mux(!raw.isZero && raw.sign, 0.U, self.bits) result } override def zero: Float = 0.U.asTypeOf(self) override def identity: Float = Cat(0.U(2.W), ~(0.U((self.expWidth-1).W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) override def minimum: Float = Cat(1.U, ~(0.U(self.expWidth.W)), 0.U((self.sigWidth-1).W)).asTypeOf(self) } } implicit object DummySIntArithmetic extends Arithmetic[DummySInt] { override implicit def cast(self: DummySInt) = new ArithmeticOps(self) { override def *(t: DummySInt) = self.dontCare override def mac(m1: DummySInt, m2: DummySInt) = self.dontCare override def +(t: DummySInt) = self.dontCare override def -(t: DummySInt) = self.dontCare override def >>(t: UInt) = self.dontCare override def >(t: DummySInt): Bool = false.B override def identity = self.dontCare override def withWidthOf(t: DummySInt) = self.dontCare override def clippedToWidthOf(t: DummySInt) = self.dontCare override def relu = self.dontCare override def zero = self.dontCare override def minimum: DummySInt = self.dontCare } } }
module MacUnit_111( // @[PE.scala:14:7] input clock, // @[PE.scala:14:7] input reset, // @[PE.scala:14:7] input [7:0] io_in_a, // @[PE.scala:16:14] input [7:0] io_in_b, // @[PE.scala:16:14] input [31:0] io_in_c, // @[PE.scala:16:14] output [19:0] io_out_d // @[PE.scala:16:14] ); wire [7:0] io_in_a_0 = io_in_a; // @[PE.scala:14:7] wire [7:0] io_in_b_0 = io_in_b; // @[PE.scala:14:7] wire [31:0] io_in_c_0 = io_in_c; // @[PE.scala:14:7] wire [19:0] io_out_d_0; // @[PE.scala:14:7] wire [15:0] _io_out_d_T = {{8{io_in_a_0[7]}}, io_in_a_0} * {{8{io_in_b_0[7]}}, io_in_b_0}; // @[PE.scala:14:7] wire [32:0] _io_out_d_T_1 = {{17{_io_out_d_T[15]}}, _io_out_d_T} + {io_in_c_0[31], io_in_c_0}; // @[PE.scala:14:7] wire [31:0] _io_out_d_T_2 = _io_out_d_T_1[31:0]; // @[Arithmetic.scala:93:54] wire [31:0] _io_out_d_T_3 = _io_out_d_T_2; // @[Arithmetic.scala:93:54] assign io_out_d_0 = _io_out_d_T_3[19:0]; // @[PE.scala:14:7, :23:12] assign io_out_d = io_out_d_0; // @[PE.scala:14:7] endmodule
Generate the Verilog code corresponding to the following Chisel files. File ShiftReg.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ // Similar to the Chisel ShiftRegister but allows the user to suggest a // name to the registers that get instantiated, and // to provide a reset value. object ShiftRegInit { def apply[T <: Data](in: T, n: Int, init: T, name: Option[String] = None): T = (0 until n).foldRight(in) { case (i, next) => { val r = RegNext(next, init) name.foreach { na => r.suggestName(s"${na}_${i}") } r } } } /** These wrap behavioral * shift registers into specific modules to allow for * backend flows to replace or constrain * them properly when used for CDC synchronization, * rather than buffering. * * The different types vary in their reset behavior: * AsyncResetShiftReg -- Asynchronously reset register array * A W(width) x D(depth) sized array is constructed from D instantiations of a * W-wide register vector. Functionally identical to AsyncResetSyncrhonizerShiftReg, * but only used for timing applications */ abstract class AbstractPipelineReg(w: Int = 1) extends Module { val io = IO(new Bundle { val d = Input(UInt(w.W)) val q = Output(UInt(w.W)) } ) } object AbstractPipelineReg { def apply [T <: Data](gen: => AbstractPipelineReg, in: T, name: Option[String] = None): T = { val chain = Module(gen) name.foreach{ chain.suggestName(_) } chain.io.d := in.asUInt chain.io.q.asTypeOf(in) } } class AsyncResetShiftReg(w: Int = 1, depth: Int = 1, init: Int = 0, name: String = "pipe") extends AbstractPipelineReg(w) { require(depth > 0, "Depth must be greater than 0.") override def desiredName = s"AsyncResetShiftReg_w${w}_d${depth}_i${init}" val chain = List.tabulate(depth) { i => Module (new AsyncResetRegVec(w, init)).suggestName(s"${name}_${i}") } chain.last.io.d := io.d chain.last.io.en := true.B (chain.init zip chain.tail).foreach { case (sink, source) => sink.io.d := source.io.q sink.io.en := true.B } io.q := chain.head.io.q } object AsyncResetShiftReg { def apply [T <: Data](in: T, depth: Int, init: Int = 0, name: Option[String] = None): T = AbstractPipelineReg(new AsyncResetShiftReg(in.getWidth, depth, init), in, name) def apply [T <: Data](in: T, depth: Int, name: Option[String]): T = apply(in, depth, 0, name) def apply [T <: Data](in: T, depth: Int, init: T, name: Option[String]): T = apply(in, depth, init.litValue.toInt, name) def apply [T <: Data](in: T, depth: Int, init: T): T = apply (in, depth, init.litValue.toInt, None) } File AsyncQueue.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.util import chisel3._ import chisel3.util._ case class AsyncQueueParams( depth: Int = 8, sync: Int = 3, safe: Boolean = true, // If safe is true, then effort is made to resynchronize the crossing indices when either side is reset. // This makes it safe/possible to reset one side of the crossing (but not the other) when the queue is empty. narrow: Boolean = false) // If narrow is true then the read mux is moved to the source side of the crossing. // This reduces the number of level shifters in the case where the clock crossing is also a voltage crossing, // at the expense of a combinational path from the sink to the source and back to the sink. { require (depth > 0 && isPow2(depth)) require (sync >= 2) val bits = log2Ceil(depth) val wires = if (narrow) 1 else depth } object AsyncQueueParams { // When there is only one entry, we don't need narrow. def singleton(sync: Int = 3, safe: Boolean = true) = AsyncQueueParams(1, sync, safe, false) } class AsyncBundleSafety extends Bundle { val ridx_valid = Input (Bool()) val widx_valid = Output(Bool()) val source_reset_n = Output(Bool()) val sink_reset_n = Input (Bool()) } class AsyncBundle[T <: Data](private val gen: T, val params: AsyncQueueParams = AsyncQueueParams()) extends Bundle { // Data-path synchronization val mem = Output(Vec(params.wires, gen)) val ridx = Input (UInt((params.bits+1).W)) val widx = Output(UInt((params.bits+1).W)) val index = params.narrow.option(Input(UInt(params.bits.W))) // Signals used to self-stabilize a safe AsyncQueue val safe = params.safe.option(new AsyncBundleSafety) } object GrayCounter { def apply(bits: Int, increment: Bool = true.B, clear: Bool = false.B, name: String = "binary"): UInt = { val incremented = Wire(UInt(bits.W)) val binary = RegNext(next=incremented, init=0.U).suggestName(name) incremented := Mux(clear, 0.U, binary + increment.asUInt) incremented ^ (incremented >> 1) } } class AsyncValidSync(sync: Int, desc: String) extends RawModule { val io = IO(new Bundle { val in = Input(Bool()) val out = Output(Bool()) }) val clock = IO(Input(Clock())) val reset = IO(Input(AsyncReset())) withClockAndReset(clock, reset){ io.out := AsyncResetSynchronizerShiftReg(io.in, sync, Some(desc)) } } class AsyncQueueSource[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSource_${gen.typeName}" val io = IO(new Bundle { // These come from the source domain val enq = Flipped(Decoupled(gen)) // These cross to the sink clock domain val async = new AsyncBundle(gen, params) }) val bits = params.bits val sink_ready = WireInit(true.B) val mem = Reg(Vec(params.depth, gen)) // This does NOT need to be reset at all. val widx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.enq.fire, !sink_ready, "widx_bin")) val ridx = AsyncResetSynchronizerShiftReg(io.async.ridx, params.sync, Some("ridx_gray")) val ready = sink_ready && widx =/= (ridx ^ (params.depth | params.depth >> 1).U) val index = if (bits == 0) 0.U else io.async.widx(bits-1, 0) ^ (io.async.widx(bits, bits) << (bits-1)) when (io.enq.fire) { mem(index) := io.enq.bits } val ready_reg = withReset(reset.asAsyncReset)(RegNext(next=ready, init=false.B).suggestName("ready_reg")) io.enq.ready := ready_reg && sink_ready val widx_reg = withReset(reset.asAsyncReset)(RegNext(next=widx, init=0.U).suggestName("widx_gray")) io.async.widx := widx_reg io.async.index match { case Some(index) => io.async.mem(0) := mem(index) case None => io.async.mem := mem } io.async.safe.foreach { sio => val source_valid_0 = Module(new AsyncValidSync(params.sync, "source_valid_0")) val source_valid_1 = Module(new AsyncValidSync(params.sync, "source_valid_1")) val sink_extend = Module(new AsyncValidSync(params.sync, "sink_extend")) val sink_valid = Module(new AsyncValidSync(params.sync, "sink_valid")) source_valid_0.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset source_valid_1.reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_extend .reset := (reset.asBool || !sio.sink_reset_n).asAsyncReset sink_valid .reset := reset.asAsyncReset source_valid_0.clock := clock source_valid_1.clock := clock sink_extend .clock := clock sink_valid .clock := clock source_valid_0.io.in := true.B source_valid_1.io.in := source_valid_0.io.out sio.widx_valid := source_valid_1.io.out sink_extend.io.in := sio.ridx_valid sink_valid.io.in := sink_extend.io.out sink_ready := sink_valid.io.out sio.source_reset_n := !reset.asBool // Assert that if there is stuff in the queue, then reset cannot happen // Impossible to write because dequeue can occur on the receiving side, // then reset allowed to happen, but write side cannot know that dequeue // occurred. // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // assert (!(reset || !sio.sink_reset_n) || !io.enq.valid, "Enqueue while sink is reset and AsyncQueueSource is unprotected") // assert (!reset_rise || prev_idx_match.asBool, "Sink reset while AsyncQueueSource not empty") } } class AsyncQueueSink[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Module { override def desiredName = s"AsyncQueueSink_${gen.typeName}" val io = IO(new Bundle { // These come from the sink domain val deq = Decoupled(gen) // These cross to the source clock domain val async = Flipped(new AsyncBundle(gen, params)) }) val bits = params.bits val source_ready = WireInit(true.B) val ridx = withReset(reset.asAsyncReset)(GrayCounter(bits+1, io.deq.fire, !source_ready, "ridx_bin")) val widx = AsyncResetSynchronizerShiftReg(io.async.widx, params.sync, Some("widx_gray")) val valid = source_ready && ridx =/= widx // The mux is safe because timing analysis ensures ridx has reached the register // On an ASIC, changes to the unread location cannot affect the selected value // On an FPGA, only one input changes at a time => mem updates don't cause glitches // The register only latches when the selected valued is not being written val index = if (bits == 0) 0.U else ridx(bits-1, 0) ^ (ridx(bits, bits) << (bits-1)) io.async.index.foreach { _ := index } // This register does not NEED to be reset, as its contents will not // be considered unless the asynchronously reset deq valid register is set. // It is possible that bits latches when the source domain is reset / has power cut // This is safe, because isolation gates brought mem low before the zeroed widx reached us val deq_bits_nxt = io.async.mem(if (params.narrow) 0.U else index) io.deq.bits := ClockCrossingReg(deq_bits_nxt, en = valid, doInit = false, name = Some("deq_bits_reg")) val valid_reg = withReset(reset.asAsyncReset)(RegNext(next=valid, init=false.B).suggestName("valid_reg")) io.deq.valid := valid_reg && source_ready val ridx_reg = withReset(reset.asAsyncReset)(RegNext(next=ridx, init=0.U).suggestName("ridx_gray")) io.async.ridx := ridx_reg io.async.safe.foreach { sio => val sink_valid_0 = Module(new AsyncValidSync(params.sync, "sink_valid_0")) val sink_valid_1 = Module(new AsyncValidSync(params.sync, "sink_valid_1")) val source_extend = Module(new AsyncValidSync(params.sync, "source_extend")) val source_valid = Module(new AsyncValidSync(params.sync, "source_valid")) sink_valid_0 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset sink_valid_1 .reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_extend.reset := (reset.asBool || !sio.source_reset_n).asAsyncReset source_valid .reset := reset.asAsyncReset sink_valid_0 .clock := clock sink_valid_1 .clock := clock source_extend.clock := clock source_valid .clock := clock sink_valid_0.io.in := true.B sink_valid_1.io.in := sink_valid_0.io.out sio.ridx_valid := sink_valid_1.io.out source_extend.io.in := sio.widx_valid source_valid.io.in := source_extend.io.out source_ready := source_valid.io.out sio.sink_reset_n := !reset.asBool // TODO: write some sort of sanity check assertion for users // that denote don't reset when there is activity // // val reset_and_extend = !source_ready || !sio.source_reset_n || reset.asBool // val reset_and_extend_prev = RegNext(reset_and_extend, true.B) // val reset_rise = !reset_and_extend_prev && reset_and_extend // val prev_idx_match = AsyncResetReg(updateData=(io.async.widx===io.async.ridx), resetData=0) // assert (!reset_rise || prev_idx_match.asBool, "Source reset while AsyncQueueSink not empty") } } object FromAsyncBundle { // Sometimes it makes sense for the sink to have different sync than the source def apply[T <: Data](x: AsyncBundle[T]): DecoupledIO[T] = apply(x, x.params.sync) def apply[T <: Data](x: AsyncBundle[T], sync: Int): DecoupledIO[T] = { val sink = Module(new AsyncQueueSink(chiselTypeOf(x.mem(0)), x.params.copy(sync = sync))) sink.io.async <> x sink.io.deq } } object ToAsyncBundle { def apply[T <: Data](x: ReadyValidIO[T], params: AsyncQueueParams = AsyncQueueParams()): AsyncBundle[T] = { val source = Module(new AsyncQueueSource(chiselTypeOf(x.bits), params)) source.io.enq <> x source.io.async } } class AsyncQueue[T <: Data](gen: T, params: AsyncQueueParams = AsyncQueueParams()) extends Crossing[T] { val io = IO(new CrossingIO(gen)) val source = withClockAndReset(io.enq_clock, io.enq_reset) { Module(new AsyncQueueSource(gen, params)) } val sink = withClockAndReset(io.deq_clock, io.deq_reset) { Module(new AsyncQueueSink (gen, params)) } source.io.enq <> io.enq io.deq <> sink.io.deq sink.io.async <> source.io.async } File LazyModuleImp.scala: package org.chipsalliance.diplomacy.lazymodule import chisel3.{withClockAndReset, Module, RawModule, Reset, _} import chisel3.experimental.{ChiselAnnotation, CloneModuleAsRecord, SourceInfo} import firrtl.passes.InlineAnnotation import org.chipsalliance.cde.config.Parameters import org.chipsalliance.diplomacy.nodes.Dangle import scala.collection.immutable.SortedMap /** Trait describing the actual [[Module]] implementation wrapped by a [[LazyModule]]. * * This is the actual Chisel module that is lazily-evaluated in the second phase of Diplomacy. */ sealed trait LazyModuleImpLike extends RawModule { /** [[LazyModule]] that contains this instance. */ val wrapper: LazyModule /** IOs that will be automatically "punched" for this instance. */ val auto: AutoBundle /** The metadata that describes the [[HalfEdge]]s which generated [[auto]]. */ protected[diplomacy] val dangles: Seq[Dangle] // [[wrapper.module]] had better not be accessed while LazyModules are still being built! require( LazyModule.scope.isEmpty, s"${wrapper.name}.module was constructed before LazyModule() was run on ${LazyModule.scope.get.name}" ) /** Set module name. Defaults to the containing LazyModule's desiredName. */ override def desiredName: String = wrapper.desiredName suggestName(wrapper.suggestedName) /** [[Parameters]] for chisel [[Module]]s. */ implicit val p: Parameters = wrapper.p /** instantiate this [[LazyModule]], return [[AutoBundle]] and a unconnected [[Dangle]]s from this module and * submodules. */ protected[diplomacy] def instantiate(): (AutoBundle, List[Dangle]) = { // 1. It will recursively append [[wrapper.children]] into [[chisel3.internal.Builder]], // 2. return [[Dangle]]s from each module. val childDangles = wrapper.children.reverse.flatMap { c => implicit val sourceInfo: SourceInfo = c.info c.cloneProto.map { cp => // If the child is a clone, then recursively set cloneProto of its children as well def assignCloneProtos(bases: Seq[LazyModule], clones: Seq[LazyModule]): Unit = { require(bases.size == clones.size) (bases.zip(clones)).map { case (l, r) => require(l.getClass == r.getClass, s"Cloned children class mismatch ${l.name} != ${r.name}") l.cloneProto = Some(r) assignCloneProtos(l.children, r.children) } } assignCloneProtos(c.children, cp.children) // Clone the child module as a record, and get its [[AutoBundle]] val clone = CloneModuleAsRecord(cp.module).suggestName(c.suggestedName) val clonedAuto = clone("auto").asInstanceOf[AutoBundle] // Get the empty [[Dangle]]'s of the cloned child val rawDangles = c.cloneDangles() require(rawDangles.size == clonedAuto.elements.size) // Assign the [[AutoBundle]] fields of the cloned record to the empty [[Dangle]]'s val dangles = (rawDangles.zip(clonedAuto.elements)).map { case (d, (_, io)) => d.copy(dataOpt = Some(io)) } dangles }.getOrElse { // For non-clones, instantiate the child module val mod = try { Module(c.module) } catch { case e: ChiselException => { println(s"Chisel exception caught when instantiating ${c.name} within ${this.name} at ${c.line}") throw e } } mod.dangles } } // Ask each node in this [[LazyModule]] to call [[BaseNode.instantiate]]. // This will result in a sequence of [[Dangle]] from these [[BaseNode]]s. val nodeDangles = wrapper.nodes.reverse.flatMap(_.instantiate()) // Accumulate all the [[Dangle]]s from this node and any accumulated from its [[wrapper.children]] val allDangles = nodeDangles ++ childDangles // Group [[allDangles]] by their [[source]]. val pairing = SortedMap(allDangles.groupBy(_.source).toSeq: _*) // For each [[source]] set of [[Dangle]]s of size 2, ensure that these // can be connected as a source-sink pair (have opposite flipped value). // Make the connection and mark them as [[done]]. val done = Set() ++ pairing.values.filter(_.size == 2).map { case Seq(a, b) => require(a.flipped != b.flipped) // @todo <> in chisel3 makes directionless connection. if (a.flipped) { a.data <> b.data } else { b.data <> a.data } a.source case _ => None } // Find all [[Dangle]]s which are still not connected. These will end up as [[AutoBundle]] [[IO]] ports on the module. val forward = allDangles.filter(d => !done(d.source)) // Generate [[AutoBundle]] IO from [[forward]]. val auto = IO(new AutoBundle(forward.map { d => (d.name, d.data, d.flipped) }: _*)) // Pass the [[Dangle]]s which remained and were used to generate the [[AutoBundle]] I/O ports up to the [[parent]] [[LazyModule]] val dangles = (forward.zip(auto.elements)).map { case (d, (_, io)) => if (d.flipped) { d.data <> io } else { io <> d.data } d.copy(dataOpt = Some(io), name = wrapper.suggestedName + "_" + d.name) } // Push all [[LazyModule.inModuleBody]] to [[chisel3.internal.Builder]]. wrapper.inModuleBody.reverse.foreach { _() } if (wrapper.shouldBeInlined) { chisel3.experimental.annotate(new ChiselAnnotation { def toFirrtl = InlineAnnotation(toNamed) }) } // Return [[IO]] and [[Dangle]] of this [[LazyModuleImp]]. (auto, dangles) } } /** Actual description of a [[Module]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyModuleImp(val wrapper: LazyModule) extends Module with LazyModuleImpLike { /** Instantiate hardware of this `Module`. */ val (auto, dangles) = instantiate() } /** Actual description of a [[RawModule]] which can be instantiated by a call to [[LazyModule.module]]. * * @param wrapper * the [[LazyModule]] from which the `.module` call is being made. */ class LazyRawModuleImp(val wrapper: LazyModule) extends RawModule with LazyModuleImpLike { // These wires are the default clock+reset for all LazyModule children. // It is recommended to drive these even if you manually drive the [[clock]] and [[reset]] of all of the // [[LazyRawModuleImp]] children. // Otherwise, anonymous children ([[Monitor]]s for example) will not have their [[clock]] and/or [[reset]] driven properly. /** drive clock explicitly. */ val childClock: Clock = Wire(Clock()) /** drive reset explicitly. */ val childReset: Reset = Wire(Reset()) // the default is that these are disabled childClock := false.B.asClock childReset := chisel3.DontCare def provideImplicitClockToLazyChildren: Boolean = false val (auto, dangles) = if (provideImplicitClockToLazyChildren) { withClockAndReset(childClock, childReset) { instantiate() } } else { instantiate() } } File Debug.scala: // See LICENSE.SiFive for license details. package freechips.rocketchip.devices.debug import chisel3._ import chisel3.util._ import org.chipsalliance.cde.config._ import org.chipsalliance.diplomacy.lazymodule._ import freechips.rocketchip.amba.apb.{APBFanout, APBToTL} import freechips.rocketchip.devices.debug.systembusaccess.{SBToTL, SystemBusAccessModule} import freechips.rocketchip.devices.tilelink.{DevNullParams, TLBusBypass, TLError} import freechips.rocketchip.diplomacy.{AddressSet, BufferParams} import freechips.rocketchip.resources.{Description, Device, Resource, ResourceBindings, ResourceString, SimpleDevice} import freechips.rocketchip.interrupts.{IntNexusNode, IntSinkParameters, IntSinkPortParameters, IntSourceParameters, IntSourcePortParameters, IntSyncCrossingSource, IntSyncIdentityNode} import freechips.rocketchip.regmapper.{RegField, RegFieldAccessType, RegFieldDesc, RegFieldGroup, RegFieldWrType, RegReadFn, RegWriteFn} import freechips.rocketchip.rocket.{CSRs, Instructions} import freechips.rocketchip.tile.MaxHartIdBits import freechips.rocketchip.tilelink.{TLAsyncCrossingSink, TLAsyncCrossingSource, TLBuffer, TLRegisterNode, TLXbar} import freechips.rocketchip.util.{Annotated, AsyncBundle, AsyncQueueParams, AsyncResetSynchronizerShiftReg, FromAsyncBundle, ParameterizedBundle, ResetSynchronizerShiftReg, ToAsyncBundle} import freechips.rocketchip.util.SeqBoolBitwiseOps import freechips.rocketchip.util.SeqToAugmentedSeq import freechips.rocketchip.util.BooleanToAugmentedBoolean object DsbBusConsts { def sbAddrWidth = 12 def sbIdWidth = 10 } object DsbRegAddrs{ // These are used by the ROM. def HALTED = 0x100 def GOING = 0x104 def RESUMING = 0x108 def EXCEPTION = 0x10C def WHERETO = 0x300 // This needs to be aligned for up to lq/sq // This shows up in HartInfo, and needs to be aligned // to enable up to LQ/SQ instructions. def DATA = 0x380 // We want DATA to immediately follow PROGBUF so that we can // use them interchangeably. Leave another slot if there is an // implicit ebreak. def PROGBUF(cfg:DebugModuleParams) = { val tmp = DATA - (cfg.nProgramBufferWords * 4) if (cfg.hasImplicitEbreak) (tmp - 4) else tmp } // This is unused if hasImpEbreak is false, and just points to the end of the PROGBUF. def IMPEBREAK(cfg: DebugModuleParams) = { DATA - 4 } // We want abstract to be immediately before PROGBUF // because we auto-generate 2 (or 5) instructions. def ABSTRACT(cfg:DebugModuleParams) = PROGBUF(cfg) - (cfg.nAbstractInstructions * 4) def FLAGS = 0x400 def ROMBASE = 0x800 } /** Enumerations used both in the hardware * and in the configuration specification. */ object DebugModuleAccessType extends scala.Enumeration { type DebugModuleAccessType = Value val Access8Bit, Access16Bit, Access32Bit, Access64Bit, Access128Bit = Value } object DebugAbstractCommandError extends scala.Enumeration { type DebugAbstractCommandError = Value val Success, ErrBusy, ErrNotSupported, ErrException, ErrHaltResume = Value } object DebugAbstractCommandType extends scala.Enumeration { type DebugAbstractCommandType = Value val AccessRegister, QuickAccess = Value } /** Parameters exposed to the top-level design, set based on * external requirements, etc. * * This object checks that the parameters conform to the * full specification. The implementation which receives this * object can perform more checks on what that implementation * actually supports. * @param nComponents Number of components to support debugging. * @param baseAddress Base offest for debugEntry and debugException * @param nDMIAddrSize Size of the Debug Bus Address * @param nAbstractDataWords Number of 32-bit words for Abstract Commands * @param nProgamBufferWords Number of 32-bit words for Program Buffer * @param hasBusMaster Whether or not a bus master should be included * @param clockGate Whether or not to use dmactive as the clockgate for debug module * @param maxSupportedSBAccess Maximum transaction size supported by System Bus Access logic. * @param supportQuickAccess Whether or not to support the quick access command. * @param supportHartArray Whether or not to implement the hart array register (if >1 hart). * @param nHaltGroups Number of halt groups * @param nExtTriggers Number of external triggers * @param hasHartResets Feature to reset all the currently selected harts * @param hasImplicitEbreak There is an additional RO program buffer word containing an ebreak * @param crossingHasSafeReset Include "safe" logic in Async Crossings so that only one side needs to be reset. */ case class DebugModuleParams ( baseAddress : BigInt = BigInt(0), nDMIAddrSize : Int = 7, nProgramBufferWords: Int = 16, nAbstractDataWords : Int = 4, nScratch : Int = 1, hasBusMaster : Boolean = false, clockGate : Boolean = true, maxSupportedSBAccess : Int = 32, supportQuickAccess : Boolean = false, supportHartArray : Boolean = true, nHaltGroups : Int = 1, nExtTriggers : Int = 0, hasHartResets : Boolean = false, hasImplicitEbreak : Boolean = false, hasAuthentication : Boolean = false, crossingHasSafeReset : Boolean = true ) { require ((nDMIAddrSize >= 7) && (nDMIAddrSize <= 32), s"Legal DMIAddrSize is 7-32, not ${nDMIAddrSize}") require ((nAbstractDataWords > 0) && (nAbstractDataWords <= 16), s"Legal nAbstractDataWords is 0-16, not ${nAbstractDataWords}") require ((nProgramBufferWords >= 0) && (nProgramBufferWords <= 16), s"Legal nProgramBufferWords is 0-16, not ${nProgramBufferWords}") require (nHaltGroups < 32, s"Legal nHaltGroups is 0-31, not ${nHaltGroups}") require (nExtTriggers <= 16, s"Legal nExtTriggers is 0-16, not ${nExtTriggers}") if (supportQuickAccess) { // TODO: Check that quick access requirements are met. } def address = AddressSet(baseAddress, 0xFFF) /** the base address of DM */ def atzero = (baseAddress == 0) /** The number of generated instructions * * When the base address is not zero, we need more instruction also, * more dscratch registers) to load/store memory mapped data register * because they may no longer be directly addressible with x0 + 12-bit imm */ def nAbstractInstructions = if (atzero) 2 else 5 def debugEntry: BigInt = baseAddress + 0x800 def debugException: BigInt = baseAddress + 0x808 def nDscratch: Int = if (atzero) 1 else 2 } object DefaultDebugModuleParams { def apply(xlen:Int /*TODO , val configStringAddr: Int*/): DebugModuleParams = { new DebugModuleParams().copy( nAbstractDataWords = (if (xlen == 32) 1 else if (xlen == 64) 2 else 4), maxSupportedSBAccess = xlen ) } } case object DebugModuleKey extends Field[Option[DebugModuleParams]](Some(DebugModuleParams())) /** Functional parameters exposed to the design configuration. * * hartIdToHartSel: For systems where hart ids are not 1:1 with hartsel, provide the mapping. * hartSelToHartId: Provide inverse mapping of the above */ case class DebugModuleHartSelFuncs ( hartIdToHartSel : (UInt) => UInt = (x:UInt) => x, hartSelToHartId : (UInt) => UInt = (x:UInt) => x ) case object DebugModuleHartSelKey extends Field(DebugModuleHartSelFuncs()) class DebugExtTriggerOut (val nExtTriggers: Int) extends Bundle { val req = Output(UInt(nExtTriggers.W)) val ack = Input(UInt(nExtTriggers.W)) } class DebugExtTriggerIn (val nExtTriggers: Int) extends Bundle { val req = Input(UInt(nExtTriggers.W)) val ack = Output(UInt(nExtTriggers.W)) } class DebugExtTriggerIO () (implicit val p: Parameters) extends ParameterizedBundle()(p) { val out = new DebugExtTriggerOut(p(DebugModuleKey).get.nExtTriggers) val in = new DebugExtTriggerIn (p(DebugModuleKey).get.nExtTriggers) } class DebugAuthenticationIO () (implicit val p: Parameters) extends ParameterizedBundle()(p) { val dmactive = Output(Bool()) val dmAuthWrite = Output(Bool()) val dmAuthRead = Output(Bool()) val dmAuthWdata = Output(UInt(32.W)) val dmAuthBusy = Input(Bool()) val dmAuthRdata = Input(UInt(32.W)) val dmAuthenticated = Input(Bool()) } // ***************************************** // Module Interfaces // // ***************************************** /** Control signals for Inner, generated in Outer * {{{ * run control: resumreq, ackhavereset, halt-on-reset mask * hart select: hasel, hartsel and the hart array mask * }}} */ class DebugInternalBundle (val nComponents: Int)(implicit val p: Parameters) extends ParameterizedBundle()(p) { /** resume request */ val resumereq = Bool() /** hart select */ val hartsel = UInt(10.W) /** reset acknowledge */ val ackhavereset = Bool() /** hart array enable */ val hasel = Bool() /** hart array mask */ val hamask = Vec(nComponents, Bool()) /** halt-on-reset mask */ val hrmask = Vec(nComponents, Bool()) } /** structure for top-level Debug Module signals which aren't the bus interfaces. */ class DebugCtrlBundle (nComponents: Int)(implicit val p: Parameters) extends ParameterizedBundle()(p) { /** debug availability status for all harts */ val debugUnavail = Input(Vec(nComponents, Bool())) /** reset signal * * for every part of the hardware platform, * including every hart, except for the DM and any * logic required to access the DM */ val ndreset = Output(Bool()) /** reset signal for the DM itself */ val dmactive = Output(Bool()) /** dmactive acknowlege */ val dmactiveAck = Input(Bool()) } // ***************************************** // Debug Module // // ***************************************** /** Parameterized version of the Debug Module defined in the * RISC-V Debug Specification * * DebugModule is a slave to two asynchronous masters: * The Debug Bus (DMI) -- This is driven by an external debugger * * The System Bus -- This services requests from the cores. Generally * this interface should only be active at the request * of the debugger, but the Debug Module may also * provide the default MTVEC since it is mapped * to address 0x0. * * DebugModule is responsible for control registers and RAM, and * Debug ROM. It runs partially off of the dmiClk (e.g. TCK) and * the TL clock. Therefore, it is divided into "Outer" portion (running * off dmiClock and dmiReset) and "Inner" (running off tl_clock and tl_reset). * This allows DMCONTROL.haltreq, hartsel, hasel, hawindowsel, hawindow, dmactive, * and ndreset to be modified even while the Core is in reset or not being clocked. * Not all reads from the Debugger to the Debug Module will actually complete * in these scenarios either, they will just block until tl_clock and tl_reset * allow them to complete. This is not strictly necessary for * proper debugger functionality. */ // Local reg mapper function : Notify when written, but give the value as well. object WNotifyWire { def apply(n: Int, value: UInt, set: Bool, name: String, desc: String) : RegField = { RegField(n, 0.U, RegWriteFn((valid, data) => { set := valid value := data true.B }), Some(RegFieldDesc(name = name, desc = desc, access = RegFieldAccessType.W))) } } // Local reg mapper function : Notify when accessed either as read or write. object RWNotify { def apply (n: Int, rVal: UInt, wVal: UInt, rNotify: Bool, wNotify: Bool, desc: Option[RegFieldDesc] = None): RegField = { RegField(n, RegReadFn ((ready) => {rNotify := ready ; (true.B, rVal)}), RegWriteFn((valid, data) => { wNotify := valid when (valid) {wVal := data} true.B } ), desc) } } // Local reg mapper function : Notify with value when written, take read input as presented. // This allows checking or correcting the write value before storing it in the register field. object WNotifyVal { def apply(n: Int, rVal: UInt, wVal: UInt, wNotify: Bool, desc: RegFieldDesc): RegField = { RegField(n, rVal, RegWriteFn((valid, data) => { wNotify := valid wVal := data true.B } ), desc) } } class TLDebugModuleOuter(device: Device)(implicit p: Parameters) extends LazyModule { // For Shorter Register Names import DMI_RegAddrs._ val cfg = p(DebugModuleKey).get val intnode = IntNexusNode( sourceFn = { _ => IntSourcePortParameters(Seq(IntSourceParameters(1, Seq(Resource(device, "int"))))) }, sinkFn = { _ => IntSinkPortParameters(Seq(IntSinkParameters())) }, outputRequiresInput = false) val dmiNode = TLRegisterNode ( address = AddressSet.misaligned(DMI_DMCONTROL << 2, 4) ++ AddressSet.misaligned(DMI_HARTINFO << 2, 4) ++ AddressSet.misaligned(DMI_HAWINDOWSEL << 2, 4) ++ AddressSet.misaligned(DMI_HAWINDOW << 2, 4), device = device, beatBytes = 4, executable = false ) lazy val module = new Impl class Impl extends LazyModuleImp(this) { require (intnode.edges.in.size == 0, "Debug Module does not accept interrupts") val nComponents = intnode.out.size def getNComponents = () => nComponents val supportHartArray = cfg.supportHartArray && (nComponents > 1) // no hart array if only one hart val io = IO(new Bundle { /** structure for top-level Debug Module signals which aren't the bus interfaces. */ val ctrl = (new DebugCtrlBundle(nComponents)) /** control signals for Inner, generated in Outer */ val innerCtrl = new DecoupledIO(new DebugInternalBundle(nComponents)) /** debug interruption from Inner to Outer * * contains 2 type of debug interruption causes: * - halt group * - halt-on-reset */ val hgDebugInt = Input(Vec(nComponents, Bool())) /** hart reset request to core */ val hartResetReq = cfg.hasHartResets.option(Output(Vec(nComponents, Bool()))) /** authentication support */ val dmAuthenticated = cfg.hasAuthentication.option(Input(Bool())) }) val omRegMap = withReset(reset.asAsyncReset) { // FIXME: Instead of casting reset to ensure it is Async, assert/require reset.Type == AsyncReset (when this feature is available) val dmAuthenticated = io.dmAuthenticated.map( dma => ResetSynchronizerShiftReg(in=dma, sync=3, name=Some("dmAuthenticated_sync"))).getOrElse(true.B) //----DMCONTROL (The whole point of 'Outer' is to maintain this register on dmiClock (e.g. TCK) domain, so that it // can be written even if 'Inner' is not being clocked or is in reset. This allows halting // harts while the rest of the system is in reset. It doesn't really allow any other // register accesses, which will keep returning 'busy' to the debugger interface. val DMCONTROLReset = WireInit(0.U.asTypeOf(new DMCONTROLFields())) val DMCONTROLNxt = WireInit(0.U.asTypeOf(new DMCONTROLFields())) val DMCONTROLReg = RegNext(next=DMCONTROLNxt, init=0.U.asTypeOf(DMCONTROLNxt)).suggestName("DMCONTROLReg") val hartsel_mask = if (nComponents > 1) ((1 << p(MaxHartIdBits)) - 1).U else 0.U val DMCONTROLWrData = WireInit(0.U.asTypeOf(new DMCONTROLFields())) val dmactiveWrEn = WireInit(false.B) val ndmresetWrEn = WireInit(false.B) val clrresethaltreqWrEn = WireInit(false.B) val setresethaltreqWrEn = WireInit(false.B) val hartselloWrEn = WireInit(false.B) val haselWrEn = WireInit(false.B) val ackhaveresetWrEn = WireInit(false.B) val hartresetWrEn = WireInit(false.B) val resumereqWrEn = WireInit(false.B) val haltreqWrEn = WireInit(false.B) val dmactive = DMCONTROLReg.dmactive DMCONTROLNxt := DMCONTROLReg when (~dmactive) { DMCONTROLNxt := DMCONTROLReset } .otherwise { when (dmAuthenticated && ndmresetWrEn) { DMCONTROLNxt.ndmreset := DMCONTROLWrData.ndmreset } when (dmAuthenticated && hartselloWrEn) { DMCONTROLNxt.hartsello := DMCONTROLWrData.hartsello & hartsel_mask} when (dmAuthenticated && haselWrEn) { DMCONTROLNxt.hasel := DMCONTROLWrData.hasel } when (dmAuthenticated && hartresetWrEn) { DMCONTROLNxt.hartreset := DMCONTROLWrData.hartreset } when (dmAuthenticated && haltreqWrEn) { DMCONTROLNxt.haltreq := DMCONTROLWrData.haltreq } } // Put this last to override its own effects. when (dmactiveWrEn) { DMCONTROLNxt.dmactive := DMCONTROLWrData.dmactive } //----HARTINFO // DATA registers are mapped to memory. The dataaddr field of HARTINFO has only // 12 bits and assumes the DM base is 0. If not at 0, then HARTINFO reads as 0 // (implying nonexistence according to the Debug Spec). val HARTINFORdData = WireInit(0.U.asTypeOf(new HARTINFOFields())) if (cfg.atzero) when (dmAuthenticated) { HARTINFORdData.dataaccess := true.B HARTINFORdData.datasize := cfg.nAbstractDataWords.U HARTINFORdData.dataaddr := DsbRegAddrs.DATA.U HARTINFORdData.nscratch := cfg.nScratch.U } //-------------------------------------------------------------- // Hart array mask and window // hamask is hart array mask(1 bit per component), which doesn't include the hart selected by dmcontrol.hartsello // HAWINDOWSEL selects a 32-bit slice of HAMASK to be visible for read/write in HAWINDOW //-------------------------------------------------------------- val hamask = WireInit(VecInit(Seq.fill(nComponents) {false.B} )) def haWindowSize = 32 // The following need to be declared even if supportHartArray is false due to reference // at compile time by dmiNode.regmap val HAWINDOWSELWrData = WireInit(0.U.asTypeOf(new HAWINDOWSELFields())) val HAWINDOWSELWrEn = WireInit(false.B) val HAWINDOWRdData = WireInit(0.U.asTypeOf(new HAWINDOWFields())) val HAWINDOWWrData = WireInit(0.U.asTypeOf(new HAWINDOWFields())) val HAWINDOWWrEn = WireInit(false.B) /** whether the hart is selected */ def hartSelected(hart: Int): Bool = { ((io.innerCtrl.bits.hartsel === hart.U) || (if (supportHartArray) io.innerCtrl.bits.hasel && io.innerCtrl.bits.hamask(hart) else false.B)) } val HAWINDOWSELNxt = WireInit(0.U.asTypeOf(new HAWINDOWSELFields())) val HAWINDOWSELReg = RegNext(next=HAWINDOWSELNxt, init=0.U.asTypeOf(HAWINDOWSELNxt)) if (supportHartArray) { val HAWINDOWSELReset = WireInit(0.U.asTypeOf(new HAWINDOWSELFields())) HAWINDOWSELNxt := HAWINDOWSELReg when (~dmactive || ~dmAuthenticated) { HAWINDOWSELNxt := HAWINDOWSELReset } .otherwise { when (HAWINDOWSELWrEn) { // Unneeded upper bits of HAWINDOWSEL are tied to 0. Entire register is 0 if all harts fit in one window if (nComponents > haWindowSize) { HAWINDOWSELNxt.hawindowsel := HAWINDOWSELWrData.hawindowsel & ((1 << (log2Up(nComponents) - 5)) - 1).U } else { HAWINDOWSELNxt.hawindowsel := 0.U } } } val numHAMASKSlices = ((nComponents - 1)/haWindowSize)+1 HAWINDOWRdData.maskdata := 0.U // default, overridden below // for each slice,use a hamaskReg to store the selection info for (ii <- 0 until numHAMASKSlices) { val sliceMask = if (nComponents > ((ii*haWindowSize) + haWindowSize-1)) (BigInt(1) << haWindowSize) - 1 // All harts in this slice exist else (BigInt(1)<<(nComponents - (ii*haWindowSize))) - 1 // Partial last slice val HAMASKRst = WireInit(0.U.asTypeOf(new HAWINDOWFields())) val HAMASKNxt = WireInit(0.U.asTypeOf(new HAWINDOWFields())) val HAMASKReg = RegNext(next=HAMASKNxt, init=0.U.asTypeOf(HAMASKNxt)) when (ii.U === HAWINDOWSELReg.hawindowsel) { HAWINDOWRdData.maskdata := HAMASKReg.asUInt & sliceMask.U } HAMASKNxt.maskdata := HAMASKReg.asUInt when (~dmactive || ~dmAuthenticated) { HAMASKNxt := HAMASKRst }.otherwise { when (HAWINDOWWrEn && (ii.U === HAWINDOWSELReg.hawindowsel)) { HAMASKNxt.maskdata := HAWINDOWWrData.maskdata } } // drive each slice of hamask with stored HAMASKReg or with new value being written for (jj <- 0 until haWindowSize) { if (((ii*haWindowSize) + jj) < nComponents) { val tempWrData = HAWINDOWWrData.maskdata.asBools val tempMaskReg = HAMASKReg.asUInt.asBools when (HAWINDOWWrEn && (ii.U === HAWINDOWSELReg.hawindowsel)) { hamask(ii*haWindowSize + jj) := tempWrData(jj) }.otherwise { hamask(ii*haWindowSize + jj) := tempMaskReg(jj) } } } } } //-------------------------------------------------------------- // Halt-on-reset // hrmaskReg is current set of harts that should halt-on-reset // Reset state (dmactive=0) is all zeroes // Bits are set by writing 1 to DMCONTROL.setresethaltreq // Bits are cleared by writing 1 to DMCONTROL.clrresethaltreq // Spec says if both are 1, then clrresethaltreq is executed // hrmask is the halt-on-reset mask which will be sent to inner //-------------------------------------------------------------- val hrmask = Wire(Vec(nComponents, Bool())) val hrmaskNxt = Wire(Vec(nComponents, Bool())) val hrmaskReg = RegNext(next=hrmaskNxt, init=0.U.asTypeOf(hrmaskNxt)).suggestName("hrmaskReg") hrmaskNxt := hrmaskReg for (component <- 0 until nComponents) { when (~dmactive || ~dmAuthenticated) { hrmaskNxt(component) := false.B }.elsewhen (clrresethaltreqWrEn && DMCONTROLWrData.clrresethaltreq && hartSelected(component)) { hrmaskNxt(component) := false.B }.elsewhen (setresethaltreqWrEn && DMCONTROLWrData.setresethaltreq && hartSelected(component)) { hrmaskNxt(component) := true.B } } hrmask := hrmaskNxt val dmControlRegFields = RegFieldGroup("dmcontrol", Some("debug module control register"), Seq( WNotifyVal(1, DMCONTROLReg.dmactive & io.ctrl.dmactiveAck, DMCONTROLWrData.dmactive, dmactiveWrEn, RegFieldDesc("dmactive", "debug module active", reset=Some(0))), WNotifyVal(1, DMCONTROLReg.ndmreset, DMCONTROLWrData.ndmreset, ndmresetWrEn, RegFieldDesc("ndmreset", "debug module reset output", reset=Some(0))), WNotifyVal(1, 0.U, DMCONTROLWrData.clrresethaltreq, clrresethaltreqWrEn, RegFieldDesc("clrresethaltreq", "clear reset halt request", reset=Some(0), access=RegFieldAccessType.W)), WNotifyVal(1, 0.U, DMCONTROLWrData.setresethaltreq, setresethaltreqWrEn, RegFieldDesc("setresethaltreq", "set reset halt request", reset=Some(0), access=RegFieldAccessType.W)), RegField(12), if (nComponents > 1) WNotifyVal(p(MaxHartIdBits), DMCONTROLReg.hartsello, DMCONTROLWrData.hartsello, hartselloWrEn, RegFieldDesc("hartsello", "hart select low", reset=Some(0))) else RegField(1), if (nComponents > 1) RegField(10-p(MaxHartIdBits)) else RegField(9), if (supportHartArray) WNotifyVal(1, DMCONTROLReg.hasel, DMCONTROLWrData.hasel, haselWrEn, RegFieldDesc("hasel", "hart array select", reset=Some(0))) else RegField(1), RegField(1), WNotifyVal(1, 0.U, DMCONTROLWrData.ackhavereset, ackhaveresetWrEn, RegFieldDesc("ackhavereset", "acknowledge reset", reset=Some(0), access=RegFieldAccessType.W)), if (cfg.hasHartResets) WNotifyVal(1, DMCONTROLReg.hartreset, DMCONTROLWrData.hartreset, hartresetWrEn, RegFieldDesc("hartreset", "hart reset request", reset=Some(0))) else RegField(1), WNotifyVal(1, 0.U, DMCONTROLWrData.resumereq, resumereqWrEn, RegFieldDesc("resumereq", "resume request", reset=Some(0), access=RegFieldAccessType.W)), WNotifyVal(1, DMCONTROLReg.haltreq, DMCONTROLWrData.haltreq, haltreqWrEn, // Spec says W, but maintaining previous behavior RegFieldDesc("haltreq", "halt request", reset=Some(0))) )) val hartinfoRegFields = RegFieldGroup("dmi_hartinfo", Some("hart information"), Seq( RegField.r(12, HARTINFORdData.dataaddr, RegFieldDesc("dataaddr", "data address", reset=Some(if (cfg.atzero) DsbRegAddrs.DATA else 0))), RegField.r(4, HARTINFORdData.datasize, RegFieldDesc("datasize", "number of DATA registers", reset=Some(if (cfg.atzero) cfg.nAbstractDataWords else 0))), RegField.r(1, HARTINFORdData.dataaccess, RegFieldDesc("dataaccess", "data access type", reset=Some(if (cfg.atzero) 1 else 0))), RegField(3), RegField.r(4, HARTINFORdData.nscratch, RegFieldDesc("nscratch", "number of scratch registers", reset=Some(if (cfg.atzero) cfg.nScratch else 0))) )) //-------------------------------------------------------------- // DMI register decoder for Outer //-------------------------------------------------------------- // regmap addresses are byte offsets from lowest address def DMI_DMCONTROL_OFFSET = 0 def DMI_HARTINFO_OFFSET = ((DMI_HARTINFO - DMI_DMCONTROL) << 2) def DMI_HAWINDOWSEL_OFFSET = ((DMI_HAWINDOWSEL - DMI_DMCONTROL) << 2) def DMI_HAWINDOW_OFFSET = ((DMI_HAWINDOW - DMI_DMCONTROL) << 2) val omRegMap = dmiNode.regmap( DMI_DMCONTROL_OFFSET -> dmControlRegFields, DMI_HARTINFO_OFFSET -> hartinfoRegFields, DMI_HAWINDOWSEL_OFFSET -> (if (supportHartArray && (nComponents > 32)) Seq( WNotifyVal(log2Up(nComponents)-5, HAWINDOWSELReg.hawindowsel, HAWINDOWSELWrData.hawindowsel, HAWINDOWSELWrEn, RegFieldDesc("hawindowsel", "hart array window select", reset=Some(0)))) else Nil), DMI_HAWINDOW_OFFSET -> (if (supportHartArray) Seq( WNotifyVal(if (nComponents > 31) 32 else nComponents, HAWINDOWRdData.maskdata, HAWINDOWWrData.maskdata, HAWINDOWWrEn, RegFieldDesc("hawindow", "hart array window", reset=Some(0), volatile=(nComponents > 32)))) else Nil) ) //-------------------------------------------------------------- // Interrupt Registers //-------------------------------------------------------------- val debugIntNxt = WireInit(VecInit(Seq.fill(nComponents) {false.B} )) val debugIntRegs = RegNext(next=debugIntNxt, init=0.U.asTypeOf(debugIntNxt)).suggestName("debugIntRegs") debugIntNxt := debugIntRegs val (intnode_out, _) = intnode.out.unzip for (component <- 0 until nComponents) { intnode_out(component)(0) := debugIntRegs(component) | io.hgDebugInt(component) } // sends debug interruption to Core when dmcs.haltreq is set, for (component <- 0 until nComponents) { when (~dmactive || ~dmAuthenticated) { debugIntNxt(component) := false.B }. otherwise { when (haltreqWrEn && ((DMCONTROLWrData.hartsello === component.U) || (if (supportHartArray) DMCONTROLWrData.hasel && hamask(component) else false.B))) { debugIntNxt(component) := DMCONTROLWrData.haltreq } } } // Halt request registers are set & cleared by writes to DMCONTROL.haltreq // resumereq also causes the core to execute a 'dret', // so resumereq is passed through to Inner. // hartsel/hasel/hamask must also be used by the DebugModule state machine, // so it is passed to Inner. // These registers ensure that requests to dmInner are not lost if inner clock isn't running or requests occur too close together. // If the innerCtrl async queue is not ready, the notification will be posted and held until ready is received. // Additional notifications that occur while one is already waiting update the pending data so that the last value written is sent. // Volatile events resumereq and ackhavereset are registered when they occur and remain pending until ready is received. val innerCtrlValid = Wire(Bool()) val innerCtrlValidReg = RegInit(false.B).suggestName("innerCtrlValidReg") val innerCtrlResumeReqReg = RegInit(false.B).suggestName("innerCtrlResumeReqReg") val innerCtrlAckHaveResetReg = RegInit(false.B).suggestName("innerCtrlAckHaveResetReg") innerCtrlValid := hartselloWrEn | resumereqWrEn | ackhaveresetWrEn | setresethaltreqWrEn | clrresethaltreqWrEn | haselWrEn | (HAWINDOWWrEn & supportHartArray.B) innerCtrlValidReg := io.innerCtrl.valid & ~io.innerCtrl.ready // Hold innerctrl request until the async queue accepts it innerCtrlResumeReqReg := io.innerCtrl.bits.resumereq & ~io.innerCtrl.ready // Hold resumereq until accepted innerCtrlAckHaveResetReg := io.innerCtrl.bits.ackhavereset & ~io.innerCtrl.ready // Hold ackhavereset until accepted io.innerCtrl.valid := innerCtrlValid | innerCtrlValidReg io.innerCtrl.bits.hartsel := Mux(hartselloWrEn, DMCONTROLWrData.hartsello, DMCONTROLReg.hartsello) io.innerCtrl.bits.resumereq := (resumereqWrEn & DMCONTROLWrData.resumereq) | innerCtrlResumeReqReg io.innerCtrl.bits.ackhavereset := (ackhaveresetWrEn & DMCONTROLWrData.ackhavereset) | innerCtrlAckHaveResetReg io.innerCtrl.bits.hrmask := hrmask if (supportHartArray) { io.innerCtrl.bits.hasel := Mux(haselWrEn, DMCONTROLWrData.hasel, DMCONTROLReg.hasel) io.innerCtrl.bits.hamask := hamask } else { io.innerCtrl.bits.hasel := DontCare io.innerCtrl.bits.hamask := DontCare } io.ctrl.ndreset := DMCONTROLReg.ndmreset io.ctrl.dmactive := DMCONTROLReg.dmactive // hart reset mechanism implementation if (cfg.hasHartResets) { val hartResetNxt = Wire(Vec(nComponents, Bool())) val hartResetReg = RegNext(next=hartResetNxt, init=0.U.asTypeOf(hartResetNxt)) for (component <- 0 until nComponents) { hartResetNxt(component) := DMCONTROLReg.hartreset & hartSelected(component) io.hartResetReq.get(component) := hartResetReg(component) } } omRegMap // FIXME: Remove this when withReset is removed }} } // wrap a Outer with a DMIToTL, derived by dmi clock & reset class TLDebugModuleOuterAsync(device: Device)(implicit p: Parameters) extends LazyModule { val cfg = p(DebugModuleKey).get val dmiXbar = LazyModule (new TLXbar(nameSuffix = Some("dmixbar"))) val dmi2tlOpt = (!p(ExportDebug).apb).option({ val dmi2tl = LazyModule(new DMIToTL()) dmiXbar.node := dmi2tl.node dmi2tl }) val apbNodeOpt = p(ExportDebug).apb.option({ val apb2tl = LazyModule(new APBToTL()) val apb2tlBuffer = LazyModule(new TLBuffer(BufferParams.pipe)) val dmTopAddr = (1 << cfg.nDMIAddrSize) << 2 val tlErrorParams = DevNullParams(AddressSet.misaligned(dmTopAddr, APBDebugConsts.apbDebugRegBase-dmTopAddr), maxAtomic=0, maxTransfer=4) val tlError = LazyModule(new TLError(tlErrorParams, buffer=false)) val apbXbar = LazyModule(new APBFanout()) val apbRegs = LazyModule(new APBDebugRegisters()) apbRegs.node := apbXbar.node apb2tl.node := apbXbar.node apb2tlBuffer.node := apb2tl.node dmiXbar.node := apb2tlBuffer.node tlError.node := dmiXbar.node apbXbar.node }) val dmOuter = LazyModule( new TLDebugModuleOuter(device)) val intnode = IntSyncIdentityNode() intnode :*= IntSyncCrossingSource(alreadyRegistered = true) :*= dmOuter.intnode val dmiBypass = LazyModule(new TLBusBypass(beatBytes=4, bufferError=false, maxAtomic=0, maxTransfer=4)) val dmiInnerNode = TLAsyncCrossingSource() := dmiBypass.node := dmiXbar.node dmOuter.dmiNode := dmiXbar.node lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val nComponents = dmOuter.intnode.edges.out.size val io = IO(new Bundle { val dmi_clock = Input(Clock()) val dmi_reset = Input(Reset()) /** Debug Module Interface bewteen DM and DTM * * The DTM provides access to one or more Debug Modules (DMs) using DMI */ val dmi = (!p(ExportDebug).apb).option(Flipped(new DMIIO()(p))) // Optional APB Interface is fully diplomatic so is not listed here. val ctrl = new DebugCtrlBundle(nComponents) /** conrol signals for Inner, generated in Outer */ val innerCtrl = new AsyncBundle(new DebugInternalBundle(nComponents), AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset)) /** debug interruption generated in Inner */ val hgDebugInt = Input(Vec(nComponents, Bool())) /** hart reset request to core */ val hartResetReq = p(DebugModuleKey).get.hasHartResets.option(Output(Vec(nComponents, Bool()))) /** Authentication signal from core */ val dmAuthenticated = p(DebugModuleKey).get.hasAuthentication.option(Input(Bool())) }) val rf_reset = IO(Input(Reset())) // RF transform childClock := io.dmi_clock childReset := io.dmi_reset override def provideImplicitClockToLazyChildren = true withClockAndReset(childClock, childReset) { dmi2tlOpt.foreach { _.module.io.dmi <> io.dmi.get } val dmactiveAck = AsyncResetSynchronizerShiftReg(in=io.ctrl.dmactiveAck, sync=3, name=Some("dmactiveAckSync")) dmiBypass.module.io.bypass := ~io.ctrl.dmactive | ~dmactiveAck io.ctrl <> dmOuter.module.io.ctrl dmOuter.module.io.ctrl.dmactiveAck := dmactiveAck // send synced version down to dmOuter io.innerCtrl <> ToAsyncBundle(dmOuter.module.io.innerCtrl, AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset)) dmOuter.module.io.hgDebugInt := io.hgDebugInt io.hartResetReq.foreach { x => dmOuter.module.io.hartResetReq.foreach {y => x := y}} io.dmAuthenticated.foreach { x => dmOuter.module.io.dmAuthenticated.foreach { y => y := x}} } } } class TLDebugModuleInner(device: Device, getNComponents: () => Int, beatBytes: Int)(implicit p: Parameters) extends LazyModule { // For Shorter Register Names import DMI_RegAddrs._ val cfg = p(DebugModuleKey).get def getCfg = () => cfg val dmTopAddr = (1 << cfg.nDMIAddrSize) << 2 /** dmiNode address set */ val dmiNode = TLRegisterNode( // Address is range 0 to 0x1FF except DMCONTROL, HARTINFO, HAWINDOWSEL, HAWINDOW which are handled by Outer address = AddressSet.misaligned(0, DMI_DMCONTROL << 2) ++ AddressSet.misaligned((DMI_DMCONTROL + 1) << 2, ((DMI_HARTINFO << 2) - ((DMI_DMCONTROL + 1) << 2))) ++ AddressSet.misaligned((DMI_HARTINFO + 1) << 2, ((DMI_HAWINDOWSEL << 2) - ((DMI_HARTINFO + 1) << 2))) ++ AddressSet.misaligned((DMI_HAWINDOW + 1) << 2, (dmTopAddr - ((DMI_HAWINDOW + 1) << 2))), device = device, beatBytes = 4, executable = false ) val tlNode = TLRegisterNode( address=Seq(cfg.address), device=device, beatBytes=beatBytes, executable=true ) val sb2tlOpt = cfg.hasBusMaster.option(LazyModule(new SBToTL())) // If we want to support custom registers read through Abstract Commands, // provide a place to bring them into the debug module. What this connects // to is up to the implementation. val customNode = new DebugCustomSink() lazy val module = new Impl class Impl extends LazyModuleImp(this){ val nComponents = getNComponents() Annotated.params(this, cfg) val supportHartArray = cfg.supportHartArray & (nComponents > 1) val nExtTriggers = cfg.nExtTriggers val nHaltGroups = if ((nComponents > 1) | (nExtTriggers > 0)) cfg.nHaltGroups else 0 // no halt groups possible if single hart with no external triggers val hartSelFuncs = if (getNComponents() > 1) p(DebugModuleHartSelKey) else DebugModuleHartSelFuncs( hartIdToHartSel = (x) => 0.U, hartSelToHartId = (x) => x ) val io = IO(new Bundle { /** dm reset signal passed in from Outer */ val dmactive = Input(Bool()) /** conrol signals for Inner * * it's generated by Outer and comes in */ val innerCtrl = Flipped(new DecoupledIO(new DebugInternalBundle(nComponents))) /** debug unavail signal passed in from Outer*/ val debugUnavail = Input(Vec(nComponents, Bool())) /** debug interruption from Inner to Outer * * contain 2 type of debug interruption causes: * - halt group * - halt-on-reset */ val hgDebugInt = Output(Vec(nComponents, Bool())) /** interface for trigger */ val extTrigger = (nExtTriggers > 0).option(new DebugExtTriggerIO()) /** vector to indicate which hart is in reset * * dm receives it from core and sends it to Inner */ val hartIsInReset = Input(Vec(nComponents, Bool())) val tl_clock = Input(Clock()) val tl_reset = Input(Reset()) /** Debug Authentication signals from core */ val auth = cfg.hasAuthentication.option(new DebugAuthenticationIO()) }) sb2tlOpt.map { sb => sb.module.clock := io.tl_clock sb.module.reset := io.tl_reset sb.module.rf_reset := io.tl_reset } //-------------------------------------------------------------- // Import constants for shorter variable names //-------------------------------------------------------------- import DMI_RegAddrs._ import DsbRegAddrs._ import DsbBusConsts._ //-------------------------------------------------------------- // Sanity Check Configuration For this implementation. //-------------------------------------------------------------- require (cfg.supportQuickAccess == false, "No Quick Access support yet") require ((nHaltGroups > 0) || (nExtTriggers == 0), "External triggers require at least 1 halt group") //-------------------------------------------------------------- // Register & Wire Declarations (which need to be pre-declared) //-------------------------------------------------------------- // run control regs: tracking all the harts // implements: see implementation-specific bits part /** all harts halted status */ val haltedBitRegs = Reg(UInt(nComponents.W)) /** all harts resume request status */ val resumeReqRegs = Reg(UInt(nComponents.W)) /** all harts have reset status */ val haveResetBitRegs = Reg(UInt(nComponents.W)) // default is 1,after resume, resumeAcks get 0 /** all harts resume ack status */ val resumeAcks = Wire(UInt(nComponents.W)) // --- regmapper outputs // hart state Id and En // in Hart Bus Access ROM val hartHaltedWrEn = Wire(Bool()) val hartHaltedId = Wire(UInt(sbIdWidth.W)) val hartGoingWrEn = Wire(Bool()) val hartGoingId = Wire(UInt(sbIdWidth.W)) val hartResumingWrEn = Wire(Bool()) val hartResumingId = Wire(UInt(sbIdWidth.W)) val hartExceptionWrEn = Wire(Bool()) val hartExceptionId = Wire(UInt(sbIdWidth.W)) // progbuf and abstract data: byte-addressable control logic // AccessLegal is set only when state = waiting // RdEn and WrEnMaybe : contrl signal drived by DMI bus val dmiProgramBufferRdEn = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} )) val dmiProgramBufferAccessLegal = WireInit(false.B) val dmiProgramBufferWrEnMaybe = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} )) val dmiAbstractDataRdEn = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} )) val dmiAbstractDataAccessLegal = WireInit(false.B) val dmiAbstractDataWrEnMaybe = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} )) //-------------------------------------------------------------- // Registers coming from 'CONTROL' in Outer //-------------------------------------------------------------- val dmAuthenticated = io.auth.map(a => a.dmAuthenticated).getOrElse(true.B) val selectedHartReg = Reg(UInt(p(MaxHartIdBits).W)) // hamaskFull is a vector of all selected harts including hartsel, whether or not supportHartArray is true val hamaskFull = WireInit(VecInit(Seq.fill(nComponents) {false.B} )) if (nComponents > 1) { when (~io.dmactive) { selectedHartReg := 0.U }.elsewhen (io.innerCtrl.fire){ selectedHartReg := io.innerCtrl.bits.hartsel } } if (supportHartArray) { val hamaskZero = WireInit(VecInit(Seq.fill(nComponents) {false.B} )) val hamaskReg = Reg(Vec(nComponents, Bool())) when (~io.dmactive || ~dmAuthenticated) { hamaskReg := hamaskZero }.elsewhen (io.innerCtrl.fire){ hamaskReg := Mux(io.innerCtrl.bits.hasel, io.innerCtrl.bits.hamask, hamaskZero) } hamaskFull := hamaskReg } // Outer.hamask doesn't consider the hart selected by dmcontrol.hartsello, // so append it here when (selectedHartReg < nComponents.U) { hamaskFull(if (nComponents == 1) 0.U(0.W) else selectedHartReg) := true.B } io.innerCtrl.ready := true.B // Construct a Vec from io.innerCtrl fields indicating whether each hart is being selected in this write // A hart may be selected by hartsel field or by hart array val hamaskWrSel = WireInit(VecInit(Seq.fill(nComponents) {false.B} )) for (component <- 0 until nComponents ) { hamaskWrSel(component) := ((io.innerCtrl.bits.hartsel === component.U) || (if (supportHartArray) io.innerCtrl.bits.hasel && io.innerCtrl.bits.hamask(component) else false.B)) } //------------------------------------- // Halt-on-reset logic // hrmask is set in dmOuter and passed in // Debug interrupt is generated when a reset occurs whose corresponding hrmask bit is set // Debug interrupt is maintained until the hart enters halted state //------------------------------------- val hrReset = WireInit(VecInit(Seq.fill(nComponents) { false.B } )) val hrDebugInt = Wire(Vec(nComponents, Bool())) val hrmaskReg = RegInit(hrReset) val hartIsInResetSync = Wire(Vec(nComponents, Bool())) for (component <- 0 until nComponents) { hartIsInResetSync(component) := AsyncResetSynchronizerShiftReg(io.hartIsInReset(component), 3, Some(s"debug_hartReset_$component")) } when (~io.dmactive || ~dmAuthenticated) { hrmaskReg := hrReset }.elsewhen (io.innerCtrl.fire){ hrmaskReg := io.innerCtrl.bits.hrmask } withReset(reset.asAsyncReset) { // ensure interrupt requests are negated at first clock edge val hrDebugIntReg = RegInit(VecInit(Seq.fill(nComponents) { false.B } )) when (~io.dmactive || ~dmAuthenticated) { hrDebugIntReg := hrReset }.otherwise { hrDebugIntReg := hrmaskReg & (hartIsInResetSync | // set debugInt during reset (hrDebugIntReg & ~(haltedBitRegs.asBools))) // maintain until core halts } hrDebugInt := hrDebugIntReg } //-------------------------------------------------------------- // DMI Registers //-------------------------------------------------------------- //----DMSTATUS val DMSTATUSRdData = WireInit(0.U.asTypeOf(new DMSTATUSFields())) DMSTATUSRdData.authenticated := dmAuthenticated DMSTATUSRdData.version := 2.U // Version 0.13 io.auth.map(a => DMSTATUSRdData.authbusy := a.dmAuthBusy) val resumereq = io.innerCtrl.fire && io.innerCtrl.bits.resumereq when (dmAuthenticated) { DMSTATUSRdData.hasresethaltreq := true.B DMSTATUSRdData.anynonexistent := (selectedHartReg >= nComponents.U) // only hartsel can be nonexistent // all harts nonexistent if hartsel is out of range and there are no harts selected in the hart array DMSTATUSRdData.allnonexistent := (selectedHartReg >= nComponents.U) & (~hamaskFull.reduce(_ | _)) when (~DMSTATUSRdData.allnonexistent) { // if no existent harts selected, all other status is false DMSTATUSRdData.anyunavail := (io.debugUnavail & hamaskFull).reduce(_ | _) DMSTATUSRdData.anyhalted := ((~io.debugUnavail & (haltedBitRegs.asBools)) & hamaskFull).reduce(_ | _) DMSTATUSRdData.anyrunning := ((~io.debugUnavail & ~(haltedBitRegs.asBools)) & hamaskFull).reduce(_ | _) DMSTATUSRdData.anyhavereset := (haveResetBitRegs.asBools & hamaskFull).reduce(_ | _) DMSTATUSRdData.anyresumeack := (resumeAcks.asBools & hamaskFull).reduce(_ | _) when (~DMSTATUSRdData.anynonexistent) { // if one hart is nonexistent, no 'all' status is set DMSTATUSRdData.allunavail := (io.debugUnavail | ~hamaskFull).reduce(_ & _) DMSTATUSRdData.allhalted := ((~io.debugUnavail & (haltedBitRegs.asBools)) | ~hamaskFull).reduce(_ & _) DMSTATUSRdData.allrunning := ((~io.debugUnavail & ~(haltedBitRegs.asBools)) | ~hamaskFull).reduce(_ & _) DMSTATUSRdData.allhavereset := (haveResetBitRegs.asBools | ~hamaskFull).reduce(_ & _) DMSTATUSRdData.allresumeack := (resumeAcks.asBools | ~hamaskFull).reduce(_ & _) } } //TODO DMSTATUSRdData.confstrptrvalid := false.B DMSTATUSRdData.impebreak := (cfg.hasImplicitEbreak).B } when(~io.dmactive || ~dmAuthenticated) { haveResetBitRegs := 0.U }.otherwise { when (io.innerCtrl.fire && io.innerCtrl.bits.ackhavereset) { haveResetBitRegs := (haveResetBitRegs & (~(hamaskWrSel.asUInt))) | hartIsInResetSync.asUInt }.otherwise { haveResetBitRegs := haveResetBitRegs | hartIsInResetSync.asUInt } } //----DMCS2 (Halt Groups) val DMCS2RdData = WireInit(0.U.asTypeOf(new DMCS2Fields())) val DMCS2WrData = WireInit(0.U.asTypeOf(new DMCS2Fields())) val hgselectWrEn = WireInit(false.B) val hgwriteWrEn = WireInit(false.B) val haltgroupWrEn = WireInit(false.B) val exttriggerWrEn = WireInit(false.B) val hgDebugInt = WireInit(VecInit(Seq.fill(nComponents) {false.B} )) if (nHaltGroups > 0) withReset (reset.asAsyncReset) { // async reset ensures triggers don't falsely fire during startup val hgBits = log2Up(nHaltGroups) // hgParticipate: Each entry indicates which hg that entity belongs to (1 to nHartGroups). 0 means no hg assigned. val hgParticipateHart = RegInit(VecInit(Seq.fill(nComponents)(0.U(hgBits.W)))) val hgParticipateTrig = if (nExtTriggers > 0) RegInit(VecInit(Seq.fill(nExtTriggers)(0.U(hgBits.W)))) else Nil // assign group index to current seledcted harts for (component <- 0 until nComponents) { when (~io.dmactive || ~dmAuthenticated) { hgParticipateHart(component) := 0.U }.otherwise { when (haltgroupWrEn & DMCS2WrData.hgwrite & ~DMCS2WrData.hgselect & hamaskFull(component) & (DMCS2WrData.haltgroup <= nHaltGroups.U)) { hgParticipateHart(component) := DMCS2WrData.haltgroup } } } DMCS2RdData.haltgroup := hgParticipateHart(if (nComponents == 1) 0.U(0.W) else selectedHartReg) if (nExtTriggers > 0) { val hgSelect = Reg(Bool()) when (~io.dmactive || ~dmAuthenticated) { hgSelect := false.B }.otherwise { when (hgselectWrEn) { hgSelect := DMCS2WrData.hgselect } } // assign group index to trigger for (trigger <- 0 until nExtTriggers) { when (~io.dmactive || ~dmAuthenticated) { hgParticipateTrig(trigger) := 0.U }.otherwise { when (haltgroupWrEn & DMCS2WrData.hgwrite & DMCS2WrData.hgselect & (DMCS2WrData.exttrigger === trigger.U) & (DMCS2WrData.haltgroup <= nHaltGroups.U)) { hgParticipateTrig(trigger) := DMCS2WrData.haltgroup } } } DMCS2RdData.hgselect := hgSelect when (hgSelect) { DMCS2RdData.haltgroup := hgParticipateTrig(0) } // If there is only 1 ext trigger, then the exttrigger field is fixed at 0 // Otherwise, instantiate a register with only the number of bits required if (nExtTriggers > 1) { val trigBits = log2Up(nExtTriggers-1) val hgExtTrigger = Reg(UInt(trigBits.W)) when (~io.dmactive || ~dmAuthenticated) { hgExtTrigger := 0.U }.otherwise { when (exttriggerWrEn & (DMCS2WrData.exttrigger < nExtTriggers.U)) { hgExtTrigger := DMCS2WrData.exttrigger } } DMCS2RdData.exttrigger := hgExtTrigger when (hgSelect) { DMCS2RdData.haltgroup := hgParticipateTrig(hgExtTrigger) } } } // Halt group state machine // IDLE: Go to FIRED when any hart in this hg writes to HALTED while its HaltedBitRegs=0 // or when any trigin assigned to this hg occurs // FIRED: Back to IDLE when all harts in this hg have set their haltedBitRegs // and all trig out in this hg have been acknowledged val hgFired = RegInit (VecInit(Seq.fill(nHaltGroups+1) {false.B} )) val hgHartFiring = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // which hg's are firing due to hart halting val hgTrigFiring = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // which hg's are firing due to trig in val hgHartsAllHalted = WireInit(VecInit(Seq.fill(nHaltGroups+1) {false.B} )) // in which hg's have all harts halted val hgTrigsAllAcked = WireInit(VecInit(Seq.fill(nHaltGroups+1) { true.B} )) // in which hg's have all trigouts been acked io.extTrigger.foreach {extTrigger => val extTriggerInReq = Wire(Vec(nExtTriggers, Bool())) val extTriggerOutAck = Wire(Vec(nExtTriggers, Bool())) extTriggerInReq := extTrigger.in.req.asBools extTriggerOutAck := extTrigger.out.ack.asBools val trigInReq = ResetSynchronizerShiftReg(in=extTriggerInReq, sync=3, name=Some("dm_extTriggerInReqSync")) val trigOutAck = ResetSynchronizerShiftReg(in=extTriggerOutAck, sync=3, name=Some("dm_extTriggerOutAckSync")) for (hg <- 1 to nHaltGroups) { hgTrigFiring(hg) := (trigInReq & ~RegNext(trigInReq) & hgParticipateTrig.map(_ === hg.U)).reduce(_ | _) hgTrigsAllAcked(hg) := (trigOutAck | hgParticipateTrig.map(_ =/= hg.U)).reduce(_ & _) } extTrigger.in.ack := trigInReq.asUInt } for (hg <- 1 to nHaltGroups) { hgHartFiring(hg) := hartHaltedWrEn & ~haltedBitRegs(hartHaltedId) & (hgParticipateHart(hartSelFuncs.hartIdToHartSel(hartHaltedId)) === hg.U) hgHartsAllHalted(hg) := (haltedBitRegs.asBools | hgParticipateHart.map(_ =/= hg.U)).reduce(_ & _) when (~io.dmactive || ~dmAuthenticated) { hgFired(hg) := false.B }.elsewhen (~hgFired(hg) & (hgHartFiring(hg) | hgTrigFiring(hg))) { hgFired(hg) := true.B }.elsewhen ( hgFired(hg) & hgHartsAllHalted(hg) & hgTrigsAllAcked(hg)) { hgFired(hg) := false.B } } // For each hg that has fired, assert debug interrupt to each hart in that hg for (component <- 0 until nComponents) { hgDebugInt(component) := hgFired(hgParticipateHart(component)) } // For each hg that has fired, assert trigger out for all external triggers in that hg io.extTrigger.foreach {extTrigger => val extTriggerOutReq = RegInit(VecInit(Seq.fill(cfg.nExtTriggers) {false.B} )) for (trig <- 0 until nExtTriggers) { extTriggerOutReq(trig) := hgFired(hgParticipateTrig(trig)) } extTrigger.out.req := extTriggerOutReq.asUInt } } io.hgDebugInt := hgDebugInt | hrDebugInt //----HALTSUM* val numHaltedStatus = ((nComponents - 1) / 32) + 1 val haltedStatus = Wire(Vec(numHaltedStatus, Bits(32.W))) for (ii <- 0 until numHaltedStatus) { when (dmAuthenticated) { haltedStatus(ii) := haltedBitRegs >> (ii*32) }.otherwise { haltedStatus(ii) := 0.U } } val haltedSummary = Cat(haltedStatus.map(_.orR).reverse) val HALTSUM1RdData = haltedSummary.asTypeOf(new HALTSUM1Fields()) val selectedHaltedStatus = Mux((selectedHartReg >> 5) > numHaltedStatus.U, 0.U, haltedStatus(selectedHartReg >> 5)) val HALTSUM0RdData = selectedHaltedStatus.asTypeOf(new HALTSUM0Fields()) // Since we only support 1024 harts, we don't implement HALTSUM2 or HALTSUM3 //----ABSTRACTCS val ABSTRACTCSReset = WireInit(0.U.asTypeOf(new ABSTRACTCSFields())) ABSTRACTCSReset.datacount := cfg.nAbstractDataWords.U ABSTRACTCSReset.progbufsize := cfg.nProgramBufferWords.U val ABSTRACTCSReg = Reg(new ABSTRACTCSFields()) val ABSTRACTCSWrData = WireInit(0.U.asTypeOf(new ABSTRACTCSFields())) val ABSTRACTCSRdData = WireInit(ABSTRACTCSReg) val ABSTRACTCSRdEn = WireInit(false.B) val ABSTRACTCSWrEnMaybe = WireInit(false.B) val ABSTRACTCSWrEnLegal = WireInit(false.B) val ABSTRACTCSWrEn = ABSTRACTCSWrEnMaybe && ABSTRACTCSWrEnLegal // multiple error types // find implement in the state machine part val errorBusy = WireInit(false.B) val errorException = WireInit(false.B) val errorUnsupported = WireInit(false.B) val errorHaltResume = WireInit(false.B) when (~io.dmactive || ~dmAuthenticated) { ABSTRACTCSReg := ABSTRACTCSReset }.otherwise { when (errorBusy){ ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrBusy.id.U }.elsewhen (errorException) { ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrException.id.U }.elsewhen (errorUnsupported) { ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrNotSupported.id.U }.elsewhen (errorHaltResume) { ABSTRACTCSReg.cmderr := DebugAbstractCommandError.ErrHaltResume.id.U }.otherwise { //W1C when (ABSTRACTCSWrEn){ ABSTRACTCSReg.cmderr := ABSTRACTCSReg.cmderr & ~(ABSTRACTCSWrData.cmderr); } } } // For busy, see below state machine. val abstractCommandBusy = WireInit(true.B) ABSTRACTCSRdData.busy := abstractCommandBusy when (~dmAuthenticated) { // read value must be 0 when not authenticated ABSTRACTCSRdData.datacount := 0.U ABSTRACTCSRdData.progbufsize := 0.U } //---- ABSTRACTAUTO // It is a mask indicating whether datai/probufi have the autoexcution permisson // this part aims to produce 3 wires : autoexecData,autoexecProg,autoexec // first two specify which reg supports autoexec // autoexec is a control signal, meaning there is at least one enabled autoexec reg // when autoexec is set, generate instructions using COMMAND register val ABSTRACTAUTOReset = WireInit(0.U.asTypeOf(new ABSTRACTAUTOFields())) val ABSTRACTAUTOReg = Reg(new ABSTRACTAUTOFields()) val ABSTRACTAUTOWrData = WireInit(0.U.asTypeOf(new ABSTRACTAUTOFields())) val ABSTRACTAUTORdData = WireInit(ABSTRACTAUTOReg) val ABSTRACTAUTORdEn = WireInit(false.B) val autoexecdataWrEnMaybe = WireInit(false.B) val autoexecprogbufWrEnMaybe = WireInit(false.B) val ABSTRACTAUTOWrEnLegal = WireInit(false.B) when (~io.dmactive || ~dmAuthenticated) { ABSTRACTAUTOReg := ABSTRACTAUTOReset }.otherwise { when (autoexecprogbufWrEnMaybe && ABSTRACTAUTOWrEnLegal) { ABSTRACTAUTOReg.autoexecprogbuf := ABSTRACTAUTOWrData.autoexecprogbuf & ( (1 << cfg.nProgramBufferWords) - 1).U } when (autoexecdataWrEnMaybe && ABSTRACTAUTOWrEnLegal) { ABSTRACTAUTOReg.autoexecdata := ABSTRACTAUTOWrData.autoexecdata & ( (1 << cfg.nAbstractDataWords) - 1).U } } // Abstract Data access vector(byte-addressable) val dmiAbstractDataAccessVec = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords * 4) {false.B} )) dmiAbstractDataAccessVec := (dmiAbstractDataWrEnMaybe zip dmiAbstractDataRdEn).map{ case (r,w) => r | w} // Program Buffer access vector(byte-addressable) val dmiProgramBufferAccessVec = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords * 4) {false.B} )) dmiProgramBufferAccessVec := (dmiProgramBufferWrEnMaybe zip dmiProgramBufferRdEn).map{ case (r,w) => r | w} // at least one word access val dmiAbstractDataAccess = dmiAbstractDataAccessVec.reduce(_ || _ ) val dmiProgramBufferAccess = dmiProgramBufferAccessVec.reduce(_ || _) // This will take the shorter of the lists, which is what we want. val autoexecData = WireInit(VecInit(Seq.fill(cfg.nAbstractDataWords) {false.B} )) val autoexecProg = WireInit(VecInit(Seq.fill(cfg.nProgramBufferWords) {false.B} )) (autoexecData zip ABSTRACTAUTOReg.autoexecdata.asBools).zipWithIndex.foreach {case (t, i) => t._1 := dmiAbstractDataAccessVec(i * 4) && t._2 } (autoexecProg zip ABSTRACTAUTOReg.autoexecprogbuf.asBools).zipWithIndex.foreach {case (t, i) => t._1 := dmiProgramBufferAccessVec(i * 4) && t._2} val autoexec = autoexecData.reduce(_ || _) || autoexecProg.reduce(_ || _) //---- COMMAND val COMMANDReset = WireInit(0.U.asTypeOf(new COMMANDFields())) val COMMANDReg = Reg(new COMMANDFields()) val COMMANDWrDataVal = WireInit(0.U(32.W)) val COMMANDWrData = WireInit(COMMANDWrDataVal.asTypeOf(new COMMANDFields())) val COMMANDWrEnMaybe = WireInit(false.B) val COMMANDWrEnLegal = WireInit(false.B) val COMMANDRdEn = WireInit(false.B) val COMMANDWrEn = COMMANDWrEnMaybe && COMMANDWrEnLegal val COMMANDRdData = COMMANDReg when (~io.dmactive || ~dmAuthenticated) { COMMANDReg := COMMANDReset }.otherwise { when (COMMANDWrEn) { COMMANDReg := COMMANDWrData } } // --- Abstract Data // These are byte addressible, s.t. the Processor can use // byte-addressible instructions to store to them. val abstractDataMem = Reg(Vec(cfg.nAbstractDataWords*4, UInt(8.W))) val abstractDataNxt = WireInit(abstractDataMem) // --- Program Buffer // byte-addressible mem val programBufferMem = Reg(Vec(cfg.nProgramBufferWords*4, UInt(8.W))) val programBufferNxt = WireInit(programBufferMem) //-------------------------------------------------------------- // These bits are implementation-specific bits set // by harts executing code. //-------------------------------------------------------------- // Run control logic when (~io.dmactive || ~dmAuthenticated) { haltedBitRegs := 0.U resumeReqRegs := 0.U }.otherwise { //remove those harts in reset resumeReqRegs := resumeReqRegs & ~(hartIsInResetSync.asUInt) val hartHaltedIdIndex = UIntToOH(hartSelFuncs.hartIdToHartSel(hartHaltedId)) val hartResumingIdIndex = UIntToOH(hartSelFuncs.hartIdToHartSel(hartResumingId)) val hartselIndex = UIntToOH(io.innerCtrl.bits.hartsel) when (hartHaltedWrEn) { // add those harts halting and remove those in reset haltedBitRegs := (haltedBitRegs | hartHaltedIdIndex) & ~(hartIsInResetSync.asUInt) }.elsewhen (hartResumingWrEn) { // remove those harts in reset and those in resume haltedBitRegs := (haltedBitRegs & ~(hartResumingIdIndex)) & ~(hartIsInResetSync.asUInt) }.otherwise { // remove those harts in reset haltedBitRegs := haltedBitRegs & ~(hartIsInResetSync.asUInt) } when (hartResumingWrEn) { // remove those harts in resume and those in reset resumeReqRegs := (resumeReqRegs & ~(hartResumingIdIndex)) & ~(hartIsInResetSync.asUInt) } when (resumereq) { // set all sleceted harts to resumeReq, remove those in reset resumeReqRegs := (resumeReqRegs | hamaskWrSel.asUInt) & ~(hartIsInResetSync.asUInt) } } when (resumereq) { // next cycle resumeAcls will be the negation of next cycle resumeReqRegs resumeAcks := (~resumeReqRegs & ~(hamaskWrSel.asUInt)) }.otherwise { resumeAcks := ~resumeReqRegs } //---- AUTHDATA val authRdEnMaybe = WireInit(false.B) val authWrEnMaybe = WireInit(false.B) io.auth.map { a => a.dmactive := io.dmactive a.dmAuthRead := authRdEnMaybe & ~a.dmAuthBusy a.dmAuthWrite := authWrEnMaybe & ~a.dmAuthBusy } val dmstatusRegFields = RegFieldGroup("dmi_dmstatus", Some("debug module status register"), Seq( RegField.r(4, DMSTATUSRdData.version, RegFieldDesc("version", "version", reset=Some(2))), RegField.r(1, DMSTATUSRdData.confstrptrvalid, RegFieldDesc("confstrptrvalid", "confstrptrvalid", reset=Some(0))), RegField.r(1, DMSTATUSRdData.hasresethaltreq, RegFieldDesc("hasresethaltreq", "hasresethaltreq", reset=Some(1))), RegField.r(1, DMSTATUSRdData.authbusy, RegFieldDesc("authbusy", "authbusy", reset=Some(0))), RegField.r(1, DMSTATUSRdData.authenticated, RegFieldDesc("authenticated", "authenticated", reset=Some(1))), RegField.r(1, DMSTATUSRdData.anyhalted, RegFieldDesc("anyhalted", "anyhalted", reset=Some(0))), RegField.r(1, DMSTATUSRdData.allhalted, RegFieldDesc("allhalted", "allhalted", reset=Some(0))), RegField.r(1, DMSTATUSRdData.anyrunning, RegFieldDesc("anyrunning", "anyrunning", reset=Some(1))), RegField.r(1, DMSTATUSRdData.allrunning, RegFieldDesc("allrunning", "allrunning", reset=Some(1))), RegField.r(1, DMSTATUSRdData.anyunavail, RegFieldDesc("anyunavail", "anyunavail", reset=Some(0))), RegField.r(1, DMSTATUSRdData.allunavail, RegFieldDesc("allunavail", "allunavail", reset=Some(0))), RegField.r(1, DMSTATUSRdData.anynonexistent, RegFieldDesc("anynonexistent", "anynonexistent", reset=Some(0))), RegField.r(1, DMSTATUSRdData.allnonexistent, RegFieldDesc("allnonexistent", "allnonexistent", reset=Some(0))), RegField.r(1, DMSTATUSRdData.anyresumeack, RegFieldDesc("anyresumeack", "anyresumeack", reset=Some(1))), RegField.r(1, DMSTATUSRdData.allresumeack, RegFieldDesc("allresumeack", "allresumeack", reset=Some(1))), RegField.r(1, DMSTATUSRdData.anyhavereset, RegFieldDesc("anyhavereset", "anyhavereset", reset=Some(0))), RegField.r(1, DMSTATUSRdData.allhavereset, RegFieldDesc("allhavereset", "allhavereset", reset=Some(0))), RegField(2), RegField.r(1, DMSTATUSRdData.impebreak, RegFieldDesc("impebreak", "impebreak", reset=Some(if (cfg.hasImplicitEbreak) 1 else 0))) )) val dmcs2RegFields = RegFieldGroup("dmi_dmcs2", Some("debug module control/status register 2"), Seq( WNotifyVal(1, DMCS2RdData.hgselect, DMCS2WrData.hgselect, hgselectWrEn, RegFieldDesc("hgselect", "select halt groups or external triggers", reset=Some(0), volatile=true)), WNotifyVal(1, 0.U, DMCS2WrData.hgwrite, hgwriteWrEn, RegFieldDesc("hgwrite", "write 1 to change halt groups", reset=None, access=RegFieldAccessType.W)), WNotifyVal(5, DMCS2RdData.haltgroup, DMCS2WrData.haltgroup, haltgroupWrEn, RegFieldDesc("haltgroup", "halt group", reset=Some(0), volatile=true)), if (nExtTriggers > 1) WNotifyVal(4, DMCS2RdData.exttrigger, DMCS2WrData.exttrigger, exttriggerWrEn, RegFieldDesc("exttrigger", "external trigger select", reset=Some(0), volatile=true)) else RegField(4) )) val abstractcsRegFields = RegFieldGroup("dmi_abstractcs", Some("abstract command control/status"), Seq( RegField.r(4, ABSTRACTCSRdData.datacount, RegFieldDesc("datacount", "number of DATA registers", reset=Some(cfg.nAbstractDataWords))), RegField(4), WNotifyVal(3, ABSTRACTCSRdData.cmderr, ABSTRACTCSWrData.cmderr, ABSTRACTCSWrEnMaybe, RegFieldDesc("cmderr", "command error", reset=Some(0), wrType=Some(RegFieldWrType.ONE_TO_CLEAR))), RegField(1), RegField.r(1, ABSTRACTCSRdData.busy, RegFieldDesc("busy", "busy", reset=Some(0))), RegField(11), RegField.r(5, ABSTRACTCSRdData.progbufsize, RegFieldDesc("progbufsize", "number of PROGBUF registers", reset=Some(cfg.nProgramBufferWords))) )) val (sbcsFields, sbAddrFields, sbDataFields): (Seq[RegField], Seq[Seq[RegField]], Seq[Seq[RegField]]) = sb2tlOpt.map{ sb2tl => SystemBusAccessModule(sb2tl, io.dmactive, dmAuthenticated)(p) }.getOrElse((Seq.empty[RegField], Seq.fill[Seq[RegField]](4)(Seq.empty[RegField]), Seq.fill[Seq[RegField]](4)(Seq.empty[RegField]))) //-------------------------------------------------------------- // Program Buffer Access (DMI ... System Bus can override) //-------------------------------------------------------------- val omRegMap = dmiNode.regmap( (DMI_DMSTATUS << 2) -> dmstatusRegFields, //TODO (DMI_CFGSTRADDR0 << 2) -> cfgStrAddrFields, (DMI_DMCS2 << 2) -> (if (nHaltGroups > 0) dmcs2RegFields else Nil), (DMI_HALTSUM0 << 2) -> RegFieldGroup("dmi_haltsum0", Some("Halt Summary 0"), Seq(RegField.r(32, HALTSUM0RdData.asUInt, RegFieldDesc("dmi_haltsum0", "halt summary 0")))), (DMI_HALTSUM1 << 2) -> RegFieldGroup("dmi_haltsum1", Some("Halt Summary 1"), Seq(RegField.r(32, HALTSUM1RdData.asUInt, RegFieldDesc("dmi_haltsum1", "halt summary 1")))), (DMI_ABSTRACTCS << 2) -> abstractcsRegFields, (DMI_ABSTRACTAUTO<< 2) -> RegFieldGroup("dmi_abstractauto", Some("abstract command autoexec"), Seq( WNotifyVal(cfg.nAbstractDataWords, ABSTRACTAUTORdData.autoexecdata, ABSTRACTAUTOWrData.autoexecdata, autoexecdataWrEnMaybe, RegFieldDesc("autoexecdata", "abstract command data autoexec", reset=Some(0))), RegField(16-cfg.nAbstractDataWords), WNotifyVal(cfg.nProgramBufferWords, ABSTRACTAUTORdData.autoexecprogbuf, ABSTRACTAUTOWrData.autoexecprogbuf, autoexecprogbufWrEnMaybe, RegFieldDesc("autoexecprogbuf", "abstract command progbuf autoexec", reset=Some(0))))), (DMI_COMMAND << 2) -> RegFieldGroup("dmi_command", Some("Abstract Command Register"), Seq(RWNotify(32, COMMANDRdData.asUInt, COMMANDWrDataVal, COMMANDRdEn, COMMANDWrEnMaybe, Some(RegFieldDesc("dmi_command", "abstract command register", reset=Some(0), volatile=true))))), (DMI_DATA0 << 2) -> RegFieldGroup("dmi_data", Some("abstract command data registers"), abstractDataMem.zipWithIndex.map{case (x, i) => RWNotify(8, Mux(dmAuthenticated, x, 0.U), abstractDataNxt(i), dmiAbstractDataRdEn(i), dmiAbstractDataWrEnMaybe(i), Some(RegFieldDesc(s"dmi_data_$i", s"abstract command data register $i", reset = Some(0), volatile=true)))}, false), (DMI_PROGBUF0 << 2) -> RegFieldGroup("dmi_progbuf", Some("abstract command progbuf registers"), programBufferMem.zipWithIndex.map{case (x, i) => RWNotify(8, Mux(dmAuthenticated, x, 0.U), programBufferNxt(i), dmiProgramBufferRdEn(i), dmiProgramBufferWrEnMaybe(i), Some(RegFieldDesc(s"dmi_progbuf_$i", s"abstract command progbuf register $i", reset = Some(0))))}, false), (DMI_AUTHDATA << 2) -> (if (cfg.hasAuthentication) RegFieldGroup("dmi_authdata", Some("authentication data exchange register"), Seq(RWNotify(32, io.auth.get.dmAuthRdata, io.auth.get.dmAuthWdata, authRdEnMaybe, authWrEnMaybe, Some(RegFieldDesc("authdata", "authentication data exchange", volatile=true))))) else Nil), (DMI_SBCS << 2) -> sbcsFields, (DMI_SBDATA0 << 2) -> sbDataFields(0), (DMI_SBDATA1 << 2) -> sbDataFields(1), (DMI_SBDATA2 << 2) -> sbDataFields(2), (DMI_SBDATA3 << 2) -> sbDataFields(3), (DMI_SBADDRESS0 << 2) -> sbAddrFields(0), (DMI_SBADDRESS1 << 2) -> sbAddrFields(1), (DMI_SBADDRESS2 << 2) -> sbAddrFields(2), (DMI_SBADDRESS3 << 2) -> sbAddrFields(3) ) // Abstract data mem is written by both the tile link interface and DMI... abstractDataMem.zipWithIndex.foreach { case (x, i) => when (dmAuthenticated && dmiAbstractDataWrEnMaybe(i) && dmiAbstractDataAccessLegal) { x := abstractDataNxt(i) } } // ... and also by custom register read (if implemented) val (customs, customParams) = customNode.in.unzip val needCustom = (customs.size > 0) && (customParams.head.addrs.size > 0) def getNeedCustom = () => needCustom if (needCustom) { val (custom, customP) = customNode.in.head require(customP.width % 8 == 0, s"Debug Custom width must be divisible by 8, not ${customP.width}") val custom_data = custom.data.asBools val custom_bytes = Seq.tabulate(customP.width/8){i => custom_data.slice(i*8, (i+1)*8).asUInt} when (custom.ready && custom.valid) { (abstractDataMem zip custom_bytes).zipWithIndex.foreach {case ((a, b), i) => a := b } } } programBufferMem.zipWithIndex.foreach { case (x, i) => when (dmAuthenticated && dmiProgramBufferWrEnMaybe(i) && dmiProgramBufferAccessLegal) { x := programBufferNxt(i) } } //-------------------------------------------------------------- // "Variable" ROM Generation //-------------------------------------------------------------- val goReg = Reg(Bool()) val goAbstract = WireInit(false.B) val goCustom = WireInit(false.B) val jalAbstract = WireInit(Instructions.JAL.value.U.asTypeOf(new GeneratedUJ())) jalAbstract.setImm(ABSTRACT(cfg) - WHERETO) when (~io.dmactive){ goReg := false.B }.otherwise { when (goAbstract) { goReg := true.B }.elsewhen (hartGoingWrEn){ assert(hartGoingId === 0.U, "Unexpected 'GOING' hart.")//Chisel3 #540 %x, expected %x", hartGoingId, 0.U) goReg := false.B } } class flagBundle extends Bundle { val reserved = UInt(6.W) val resume = Bool() val go = Bool() } val flags = WireInit(VecInit(Seq.fill(1 << selectedHartReg.getWidth) {0.U.asTypeOf(new flagBundle())} )) assert ((hartSelFuncs.hartSelToHartId(selectedHartReg) < flags.size.U), s"HartSel to HartId Mapping is illegal for this Debug Implementation, because HartID must be < ${flags.size} for it to work.") flags(hartSelFuncs.hartSelToHartId(selectedHartReg)).go := goReg for (component <- 0 until nComponents) { val componentSel = WireInit(component.U) flags(hartSelFuncs.hartSelToHartId(componentSel)).resume := resumeReqRegs(component) } //---------------------------- // Abstract Command Decoding & Generation //---------------------------- val accessRegisterCommandWr = WireInit(COMMANDWrData.asUInt.asTypeOf(new ACCESS_REGISTERFields())) /** real COMMAND*/ val accessRegisterCommandReg = WireInit(COMMANDReg.asUInt.asTypeOf(new ACCESS_REGISTERFields())) // TODO: Quick Access class GeneratedI extends Bundle { val imm = UInt(12.W) val rs1 = UInt(5.W) val funct3 = UInt(3.W) val rd = UInt(5.W) val opcode = UInt(7.W) } class GeneratedS extends Bundle { val immhi = UInt(7.W) val rs2 = UInt(5.W) val rs1 = UInt(5.W) val funct3 = UInt(3.W) val immlo = UInt(5.W) val opcode = UInt(7.W) } class GeneratedCSR extends Bundle { val imm = UInt(12.W) val rs1 = UInt(5.W) val funct3 = UInt(3.W) val rd = UInt(5.W) val opcode = UInt(7.W) } class GeneratedUJ extends Bundle { val imm3 = UInt(1.W) val imm0 = UInt(10.W) val imm1 = UInt(1.W) val imm2 = UInt(8.W) val rd = UInt(5.W) val opcode = UInt(7.W) def setImm(imm: Int) : Unit = { // TODO: Check bounds of imm. require(imm % 2 == 0, "Immediate must be even for UJ encoding.") val immWire = WireInit(imm.S(21.W)) val immBits = WireInit(VecInit(immWire.asBools)) imm0 := immBits.slice(1, 1 + 10).asUInt imm1 := immBits.slice(11, 11 + 11).asUInt imm2 := immBits.slice(12, 12 + 8).asUInt imm3 := immBits.slice(20, 20 + 1).asUInt } } require((cfg.atzero && cfg.nAbstractInstructions == 2) || (!cfg.atzero && cfg.nAbstractInstructions == 5), "Mismatch between DebugModuleParams atzero and nAbstractInstructions") val abstractGeneratedMem = Reg(Vec(cfg.nAbstractInstructions, (UInt(32.W)))) def abstractGeneratedI(cfg: DebugModuleParams): UInt = { val inst = Wire(new GeneratedI()) val offset = if (cfg.atzero) DATA else (DATA-0x800) & 0xFFF val base = if (cfg.atzero) 0.U else Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U) inst.opcode := (Instructions.LW.value.U.asTypeOf(new GeneratedI())).opcode inst.rd := (accessRegisterCommandReg.regno & 0x1F.U) inst.funct3 := accessRegisterCommandReg.size inst.rs1 := base inst.imm := offset.U inst.asUInt } def abstractGeneratedS(cfg: DebugModuleParams): UInt = { val inst = Wire(new GeneratedS()) val offset = if (cfg.atzero) DATA else (DATA-0x800) & 0xFFF val base = if (cfg.atzero) 0.U else Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U) inst.opcode := (Instructions.SW.value.U.asTypeOf(new GeneratedS())).opcode inst.immlo := (offset & 0x1F).U inst.funct3 := accessRegisterCommandReg.size inst.rs1 := base inst.rs2 := (accessRegisterCommandReg.regno & 0x1F.U) inst.immhi := (offset >> 5).U inst.asUInt } def abstractGeneratedCSR: UInt = { val inst = Wire(new GeneratedCSR()) val base = Mux(accessRegisterCommandReg.regno(0), 8.U, 9.U) // use s0 as base for odd regs, s1 as base for even regs inst := (Instructions.CSRRW.value.U.asTypeOf(new GeneratedCSR())) inst.imm := CSRs.dscratch1.U inst.rs1 := base inst.rd := base inst.asUInt } val nop = Wire(new GeneratedI()) nop := Instructions.ADDI.value.U.asTypeOf(new GeneratedI()) nop.rd := 0.U nop.rs1 := 0.U nop.imm := 0.U val isa = Wire(new GeneratedI()) isa := Instructions.ADDIW.value.U.asTypeOf(new GeneratedI()) isa.rd := 0.U isa.rs1 := 0.U isa.imm := 0.U when (goAbstract) { if (cfg.nAbstractInstructions == 2) { // ABSTRACT(0): Transfer: LW or SW, else NOP // ABSTRACT(1): Postexec: NOP else EBREAK abstractGeneratedMem(0) := Mux(accessRegisterCommandReg.transfer, Mux(accessRegisterCommandReg.write, abstractGeneratedI(cfg), abstractGeneratedS(cfg)), nop.asUInt ) abstractGeneratedMem(1) := Mux(accessRegisterCommandReg.postexec, nop.asUInt, Instructions.EBREAK.value.U) } else { // Entry: All regs in GPRs, dscratch1=offset 0x800 in DM // ABSTRACT(0): CheckISA: ADDW or NOP (exception here if size=3 and not RV64) // ABSTRACT(1): CSRRW s1,dscratch1,s1 or CSRRW s0,dscratch1,s0 // ABSTRACT(2): Transfer: LW, SW, LD, SD else NOP // ABSTRACT(3): CSRRW s1,dscratch1,s1 or CSRRW s0,dscratch1,s0 // ABSTRACT(4): Postexec: NOP else EBREAK abstractGeneratedMem(0) := Mux(accessRegisterCommandReg.transfer && accessRegisterCommandReg.size =/= 2.U, isa.asUInt, nop.asUInt) abstractGeneratedMem(1) := abstractGeneratedCSR abstractGeneratedMem(2) := Mux(accessRegisterCommandReg.transfer, Mux(accessRegisterCommandReg.write, abstractGeneratedI(cfg), abstractGeneratedS(cfg)), nop.asUInt ) abstractGeneratedMem(3) := abstractGeneratedCSR abstractGeneratedMem(4) := Mux(accessRegisterCommandReg.postexec, nop.asUInt, Instructions.EBREAK.value.U) } } //-------------------------------------------------------------- // Drive Custom Access //-------------------------------------------------------------- if (needCustom) { val (custom, customP) = customNode.in.head custom.addr := accessRegisterCommandReg.regno custom.valid := goCustom } //-------------------------------------------------------------- // Hart Bus Access //-------------------------------------------------------------- tlNode.regmap( // This memory is writable. HALTED -> Seq(WNotifyWire(sbIdWidth, hartHaltedId, hartHaltedWrEn, "debug_hart_halted", "Debug ROM Causes hart to write its hartID here when it is in Debug Mode.")), GOING -> Seq(WNotifyWire(sbIdWidth, hartGoingId, hartGoingWrEn, "debug_hart_going", "Debug ROM causes hart to write 0 here when it begins executing Debug Mode instructions.")), RESUMING -> Seq(WNotifyWire(sbIdWidth, hartResumingId, hartResumingWrEn, "debug_hart_resuming", "Debug ROM causes hart to write its hartID here when it leaves Debug Mode.")), EXCEPTION -> Seq(WNotifyWire(sbIdWidth, hartExceptionId, hartExceptionWrEn, "debug_hart_exception", "Debug ROM causes hart to write 0 here if it gets an exception in Debug Mode.")), DATA -> RegFieldGroup("debug_data", Some("Data used to communicate with Debug Module"), abstractDataMem.zipWithIndex.map {case (x, i) => RegField(8, x, RegFieldDesc(s"debug_data_$i", ""))}), PROGBUF(cfg)-> RegFieldGroup("debug_progbuf", Some("Program buffer used to communicate with Debug Module"), programBufferMem.zipWithIndex.map {case (x, i) => RegField(8, x, RegFieldDesc(s"debug_progbuf_$i", ""))}), // These sections are read-only. IMPEBREAK(cfg)-> {if (cfg.hasImplicitEbreak) Seq(RegField.r(32, Instructions.EBREAK.value.U, RegFieldDesc("debug_impebreak", "Debug Implicit EBREAK", reset=Some(Instructions.EBREAK.value)))) else Nil}, WHERETO -> Seq(RegField.r(32, jalAbstract.asUInt, RegFieldDesc("debug_whereto", "Instruction filled in by Debug Module to control hart in Debug Mode", volatile = true))), ABSTRACT(cfg) -> RegFieldGroup("debug_abstract", Some("Instructions generated by Debug Module"), abstractGeneratedMem.zipWithIndex.map{ case (x,i) => RegField.r(32, x, RegFieldDesc(s"debug_abstract_$i", "", volatile=true))}), FLAGS -> RegFieldGroup("debug_flags", Some("Memory region used to control hart going/resuming in Debug Mode"), if (nComponents == 1) { Seq.tabulate(1024) { i => RegField.r(8, flags(0).asUInt, RegFieldDesc(s"debug_flags_$i", "", volatile=true)) } } else { flags.zipWithIndex.map{case(x, i) => RegField.r(8, x.asUInt, RegFieldDesc(s"debug_flags_$i", "", volatile=true))} }), ROMBASE -> RegFieldGroup("debug_rom", Some("Debug ROM"), (if (cfg.atzero) DebugRomContents() else DebugRomNonzeroContents()).zipWithIndex.map{case (x, i) => RegField.r(8, (x & 0xFF).U(8.W), RegFieldDesc(s"debug_rom_$i", "", reset=Some(x)))}) ) // Override System Bus accesses with dmactive reset. when (~io.dmactive){ abstractDataMem.foreach {x => x := 0.U} programBufferMem.foreach {x => x := 0.U} } //-------------------------------------------------------------- // Abstract Command State Machine //-------------------------------------------------------------- object CtrlState extends scala.Enumeration { type CtrlState = Value val Waiting, CheckGenerate, Exec, Custom = Value def apply( t : Value) : UInt = { t.id.U(log2Up(values.size).W) } } import CtrlState._ // This is not an initialization! val ctrlStateReg = Reg(chiselTypeOf(CtrlState(Waiting))) val hartHalted = haltedBitRegs(if (nComponents == 1) 0.U(0.W) else selectedHartReg) val ctrlStateNxt = WireInit(ctrlStateReg) //------------------------ // DMI Register Control and Status abstractCommandBusy := (ctrlStateReg =/= CtrlState(Waiting)) ABSTRACTCSWrEnLegal := (ctrlStateReg === CtrlState(Waiting)) COMMANDWrEnLegal := (ctrlStateReg === CtrlState(Waiting)) ABSTRACTAUTOWrEnLegal := (ctrlStateReg === CtrlState(Waiting)) dmiAbstractDataAccessLegal := (ctrlStateReg === CtrlState(Waiting)) dmiProgramBufferAccessLegal := (ctrlStateReg === CtrlState(Waiting)) errorBusy := (ABSTRACTCSWrEnMaybe && ~ABSTRACTCSWrEnLegal) || (autoexecdataWrEnMaybe && ~ABSTRACTAUTOWrEnLegal) || (autoexecprogbufWrEnMaybe && ~ABSTRACTAUTOWrEnLegal) || (COMMANDWrEnMaybe && ~COMMANDWrEnLegal) || (dmiAbstractDataAccess && ~dmiAbstractDataAccessLegal) || (dmiProgramBufferAccess && ~dmiProgramBufferAccessLegal) // TODO: Maybe Quick Access val commandWrIsAccessRegister = (COMMANDWrData.cmdtype === DebugAbstractCommandType.AccessRegister.id.U) val commandRegIsAccessRegister = (COMMANDReg.cmdtype === DebugAbstractCommandType.AccessRegister.id.U) val commandWrIsUnsupported = COMMANDWrEn && !commandWrIsAccessRegister val commandRegIsUnsupported = WireInit(true.B) val commandRegBadHaltResume = WireInit(false.B) // We only support abstract commands for GPRs and any custom registers, if specified. val accessRegIsLegalSize = (accessRegisterCommandReg.size === 2.U) || (accessRegisterCommandReg.size === 3.U) val accessRegIsGPR = (accessRegisterCommandReg.regno >= 0x1000.U && accessRegisterCommandReg.regno <= 0x101F.U) && accessRegIsLegalSize val accessRegIsCustom = if (needCustom) { val (custom, customP) = customNode.in.head customP.addrs.foldLeft(false.B){ (result, current) => result || (current.U === accessRegisterCommandReg.regno)} } else false.B when (commandRegIsAccessRegister) { when (accessRegIsCustom && accessRegisterCommandReg.transfer && accessRegisterCommandReg.write === false.B) { commandRegIsUnsupported := false.B }.elsewhen (!accessRegisterCommandReg.transfer || accessRegIsGPR) { commandRegIsUnsupported := false.B commandRegBadHaltResume := ~hartHalted } } val wrAccessRegisterCommand = COMMANDWrEn && commandWrIsAccessRegister && (ABSTRACTCSReg.cmderr === 0.U) val regAccessRegisterCommand = autoexec && commandRegIsAccessRegister && (ABSTRACTCSReg.cmderr === 0.U) //------------------------ // Variable ROM STATE MACHINE // ----------------------- when (ctrlStateReg === CtrlState(Waiting)){ when (wrAccessRegisterCommand || regAccessRegisterCommand) { ctrlStateNxt := CtrlState(CheckGenerate) }.elsewhen (commandWrIsUnsupported) { // These checks are really on the command type. errorUnsupported := true.B }.elsewhen (autoexec && commandRegIsUnsupported) { errorUnsupported := true.B } }.elsewhen (ctrlStateReg === CtrlState(CheckGenerate)){ // We use this state to ensure that the COMMAND has been // registered by the time that we need to use it, to avoid // generating it directly from the COMMANDWrData. // This 'commandRegIsUnsupported' is really just checking the // AccessRegisterCommand parameters (regno) when (commandRegIsUnsupported) { errorUnsupported := true.B ctrlStateNxt := CtrlState(Waiting) }.elsewhen (commandRegBadHaltResume){ errorHaltResume := true.B ctrlStateNxt := CtrlState(Waiting) }.otherwise { when(accessRegIsCustom) { ctrlStateNxt := CtrlState(Custom) }.otherwise { ctrlStateNxt := CtrlState(Exec) goAbstract := true.B } } }.elsewhen (ctrlStateReg === CtrlState(Exec)) { // We can't just look at 'hartHalted' here, because // hartHaltedWrEn is overloaded to mean 'got an ebreak' // which may have happened when we were already halted. when(goReg === false.B && hartHaltedWrEn && (hartSelFuncs.hartIdToHartSel(hartHaltedId) === selectedHartReg)){ ctrlStateNxt := CtrlState(Waiting) } when(hartExceptionWrEn) { assert(hartExceptionId === 0.U, "Unexpected 'EXCEPTION' hart")//Chisel3 #540, %x, expected %x", hartExceptionId, 0.U) ctrlStateNxt := CtrlState(Waiting) errorException := true.B } }.elsewhen (ctrlStateReg === CtrlState(Custom)) { assert(needCustom.B, "Should not be in custom state unless we need it.") goCustom := true.B val (custom, customP) = customNode.in.head when (custom.ready && custom.valid) { ctrlStateNxt := CtrlState(Waiting) } } when (~io.dmactive || ~dmAuthenticated) { ctrlStateReg := CtrlState(Waiting) }.otherwise { ctrlStateReg := ctrlStateNxt } assert ((!io.dmactive || !hartExceptionWrEn || ctrlStateReg === CtrlState(Exec)), "Unexpected EXCEPTION write: should only get it in Debug Module EXEC state") } } // Wrapper around TL Debug Module Inner and an Async DMI Sink interface. // Handles the synchronization of dmactive, which is used as a synchronous reset // inside the Inner block. // Also is the Sink side of hartsel & resumereq fields of DMCONTROL. class TLDebugModuleInnerAsync(device: Device, getNComponents: () => Int, beatBytes: Int)(implicit p: Parameters) extends LazyModule{ val cfg = p(DebugModuleKey).get val dmInner = LazyModule(new TLDebugModuleInner(device, getNComponents, beatBytes)) val dmiXing = LazyModule(new TLAsyncCrossingSink(AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset))) val dmiNode = dmiXing.node val tlNode = dmInner.tlNode dmInner.dmiNode := dmiXing.node // Require that there are no registers in TL interface, so that spurious // processor accesses to the DM don't need to enable the clock. We don't // require this property of the SBA, because the debugger is responsible for // raising dmactive (hence enabling the clock) during these transactions. require(dmInner.tlNode.concurrency == 0) lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { // Clock/reset domains: // debug_clock / debug_reset = Debug inner domain // tl_clock / tl_reset = tilelink domain (External: clock / reset) // val io = IO(new Bundle { val debug_clock = Input(Clock()) val debug_reset = Input(Reset()) val tl_clock = Input(Clock()) val tl_reset = Input(Reset()) // These are all asynchronous and come from Outer /** reset signal for DM */ val dmactive = Input(Bool()) /** conrol signals for Inner * * generated in Outer */ val innerCtrl = Flipped(new AsyncBundle(new DebugInternalBundle(getNComponents()), AsyncQueueParams.singleton(safe=cfg.crossingHasSafeReset))) // This comes from tlClk domain. /** debug available status */ val debugUnavail = Input(Vec(getNComponents(), Bool())) /** debug interruption*/ val hgDebugInt = Output(Vec(getNComponents(), Bool())) val extTrigger = (p(DebugModuleKey).get.nExtTriggers > 0).option(new DebugExtTriggerIO()) /** vector to indicate which hart is in reset * * dm receives it from core and sends it to Inner */ val hartIsInReset = Input(Vec(getNComponents(), Bool())) /** Debug Authentication signals from core */ val auth = p(DebugModuleKey).get.hasAuthentication.option(new DebugAuthenticationIO()) }) val rf_reset = IO(Input(Reset())) // RF transform childClock := io.debug_clock childReset := io.debug_reset override def provideImplicitClockToLazyChildren = true val dmactive_synced = withClockAndReset(childClock, childReset) { val dmactive_synced = AsyncResetSynchronizerShiftReg(in=io.dmactive, sync=3, name=Some("dmactiveSync")) dmInner.module.clock := io.debug_clock dmInner.module.reset := io.debug_reset dmInner.module.io.tl_clock := io.tl_clock dmInner.module.io.tl_reset := io.tl_reset dmInner.module.io.dmactive := dmactive_synced dmInner.module.io.innerCtrl <> FromAsyncBundle(io.innerCtrl) dmInner.module.io.debugUnavail := io.debugUnavail io.hgDebugInt := dmInner.module.io.hgDebugInt io.extTrigger.foreach { x => dmInner.module.io.extTrigger.foreach {y => x <> y}} dmInner.module.io.hartIsInReset := io.hartIsInReset io.auth.foreach { x => dmInner.module.io.auth.foreach {y => x <> y}} dmactive_synced } } } /** Create a version of the TLDebugModule which includes a synchronization interface * internally for the DMI. This is no longer optional outside of this module * because the Clock must run when tl_clock isn't running or tl_reset is asserted. */ class TLDebugModule(beatBytes: Int)(implicit p: Parameters) extends LazyModule { val device = new SimpleDevice("debug-controller", Seq("sifive,debug-013","riscv,debug-013")){ override val alwaysExtended = true override def describe(resources: ResourceBindings): Description = { val Description(name, mapping) = super.describe(resources) val attach = Map( "debug-attach" -> ( (if (p(ExportDebug).apb) Seq(ResourceString("apb")) else Seq()) ++ (if (p(ExportDebug).jtag) Seq(ResourceString("jtag")) else Seq()) ++ (if (p(ExportDebug).cjtag) Seq(ResourceString("cjtag")) else Seq()) ++ (if (p(ExportDebug).dmi) Seq(ResourceString("dmi")) else Seq()))) Description(name, mapping ++ attach) } } val dmOuter : TLDebugModuleOuterAsync = LazyModule(new TLDebugModuleOuterAsync(device)(p)) val dmInner : TLDebugModuleInnerAsync = LazyModule(new TLDebugModuleInnerAsync(device, () => {dmOuter.dmOuter.intnode.edges.out.size}, beatBytes)(p)) val node = dmInner.tlNode val intnode = dmOuter.intnode val apbNodeOpt = dmOuter.apbNodeOpt dmInner.dmiNode := dmOuter.dmiInnerNode lazy val module = new Impl class Impl extends LazyRawModuleImp(this) { val nComponents = dmOuter.dmOuter.intnode.edges.out.size // Clock/reset domains: // tl_clock / tl_reset = tilelink domain // debug_clock / debug_reset = Inner debug (synchronous to tl_clock) // apb_clock / apb_reset = Outer debug with APB // dmiClock / dmiReset = Outer debug without APB // val io = IO(new Bundle { val debug_clock = Input(Clock()) val debug_reset = Input(Reset()) val tl_clock = Input(Clock()) val tl_reset = Input(Reset()) /** Debug control signals generated in Outer */ val ctrl = new DebugCtrlBundle(nComponents) /** Debug Module Interface bewteen DM and DTM * * The DTM provides access to one or more Debug Modules (DMs) using DMI */ val dmi = (!p(ExportDebug).apb).option(Flipped(new ClockedDMIIO())) val apb_clock = p(ExportDebug).apb.option(Input(Clock())) val apb_reset = p(ExportDebug).apb.option(Input(Reset())) val extTrigger = (p(DebugModuleKey).get.nExtTriggers > 0).option(new DebugExtTriggerIO()) /** vector to indicate which hart is in reset * * dm receives it from core and sends it to Inner */ val hartIsInReset = Input(Vec(nComponents, Bool())) /** hart reset request generated by hartreset-logic in Outer */ val hartResetReq = p(DebugModuleKey).get.hasHartResets.option(Output(Vec(nComponents, Bool()))) /** Debug Authentication signals from core */ val auth = p(DebugModuleKey).get.hasAuthentication.option(new DebugAuthenticationIO()) }) childClock := io.tl_clock childReset := io.tl_reset override def provideImplicitClockToLazyChildren = true dmOuter.module.io.dmi.foreach { dmOuterDMI => dmOuterDMI <> io.dmi.get.dmi dmOuter.module.io.dmi_reset := io.dmi.get.dmiReset dmOuter.module.io.dmi_clock := io.dmi.get.dmiClock dmOuter.module.rf_reset := io.dmi.get.dmiReset } (io.apb_clock zip io.apb_reset) foreach { case (c, r) => dmOuter.module.io.dmi_reset := r dmOuter.module.io.dmi_clock := c dmOuter.module.rf_reset := r } dmInner.module.rf_reset := io.debug_reset dmInner.module.io.debug_clock := io.debug_clock dmInner.module.io.debug_reset := io.debug_reset dmInner.module.io.tl_clock := io.tl_clock dmInner.module.io.tl_reset := io.tl_reset dmInner.module.io.innerCtrl <> dmOuter.module.io.innerCtrl dmInner.module.io.dmactive := dmOuter.module.io.ctrl.dmactive dmInner.module.io.debugUnavail := io.ctrl.debugUnavail dmOuter.module.io.hgDebugInt := dmInner.module.io.hgDebugInt io.ctrl <> dmOuter.module.io.ctrl io.extTrigger.foreach { x => dmInner.module.io.extTrigger.foreach {y => x <> y}} dmInner.module.io.hartIsInReset := io.hartIsInReset io.hartResetReq.foreach { x => dmOuter.module.io.hartResetReq.foreach {y => x := y}} io.auth.foreach { x => dmOuter.module.io.dmAuthenticated.get := x.dmAuthenticated } io.auth.foreach { x => dmInner.module.io.auth.foreach {y => x <> y}} } }
module TLDebugModuleInnerAsync( // @[Debug.scala:1871:9] input [2:0] auto_dmiXing_in_a_mem_0_opcode, // @[LazyModuleImp.scala:107:25] input [8:0] auto_dmiXing_in_a_mem_0_address, // @[LazyModuleImp.scala:107:25] input [31:0] auto_dmiXing_in_a_mem_0_data, // @[LazyModuleImp.scala:107:25] output auto_dmiXing_in_a_ridx, // @[LazyModuleImp.scala:107:25] input auto_dmiXing_in_a_widx, // @[LazyModuleImp.scala:107:25] output auto_dmiXing_in_a_safe_ridx_valid, // @[LazyModuleImp.scala:107:25] input auto_dmiXing_in_a_safe_widx_valid, // @[LazyModuleImp.scala:107:25] input auto_dmiXing_in_a_safe_source_reset_n, // @[LazyModuleImp.scala:107:25] output auto_dmiXing_in_a_safe_sink_reset_n, // @[LazyModuleImp.scala:107:25] output [2:0] auto_dmiXing_in_d_mem_0_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_dmiXing_in_d_mem_0_size, // @[LazyModuleImp.scala:107:25] output auto_dmiXing_in_d_mem_0_source, // @[LazyModuleImp.scala:107:25] output [31:0] auto_dmiXing_in_d_mem_0_data, // @[LazyModuleImp.scala:107:25] input auto_dmiXing_in_d_ridx, // @[LazyModuleImp.scala:107:25] output auto_dmiXing_in_d_widx, // @[LazyModuleImp.scala:107:25] input auto_dmiXing_in_d_safe_ridx_valid, // @[LazyModuleImp.scala:107:25] output auto_dmiXing_in_d_safe_widx_valid, // @[LazyModuleImp.scala:107:25] output auto_dmiXing_in_d_safe_source_reset_n, // @[LazyModuleImp.scala:107:25] input auto_dmiXing_in_d_safe_sink_reset_n, // @[LazyModuleImp.scala:107:25] input auto_dmInner_sb2tlOpt_out_a_ready, // @[LazyModuleImp.scala:107:25] output auto_dmInner_sb2tlOpt_out_a_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_dmInner_sb2tlOpt_out_a_bits_opcode, // @[LazyModuleImp.scala:107:25] output [3:0] auto_dmInner_sb2tlOpt_out_a_bits_size, // @[LazyModuleImp.scala:107:25] output [31:0] auto_dmInner_sb2tlOpt_out_a_bits_address, // @[LazyModuleImp.scala:107:25] output [7:0] auto_dmInner_sb2tlOpt_out_a_bits_data, // @[LazyModuleImp.scala:107:25] output auto_dmInner_sb2tlOpt_out_d_ready, // @[LazyModuleImp.scala:107:25] input auto_dmInner_sb2tlOpt_out_d_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_dmInner_sb2tlOpt_out_d_bits_opcode, // @[LazyModuleImp.scala:107:25] input [1:0] auto_dmInner_sb2tlOpt_out_d_bits_param, // @[LazyModuleImp.scala:107:25] input [3:0] auto_dmInner_sb2tlOpt_out_d_bits_size, // @[LazyModuleImp.scala:107:25] input [4:0] auto_dmInner_sb2tlOpt_out_d_bits_sink, // @[LazyModuleImp.scala:107:25] input auto_dmInner_sb2tlOpt_out_d_bits_denied, // @[LazyModuleImp.scala:107:25] input [7:0] auto_dmInner_sb2tlOpt_out_d_bits_data, // @[LazyModuleImp.scala:107:25] input auto_dmInner_sb2tlOpt_out_d_bits_corrupt, // @[LazyModuleImp.scala:107:25] output auto_dmInner_tl_in_a_ready, // @[LazyModuleImp.scala:107:25] input auto_dmInner_tl_in_a_valid, // @[LazyModuleImp.scala:107:25] input [2:0] auto_dmInner_tl_in_a_bits_opcode, // @[LazyModuleImp.scala:107:25] input [2:0] auto_dmInner_tl_in_a_bits_param, // @[LazyModuleImp.scala:107:25] input [1:0] auto_dmInner_tl_in_a_bits_size, // @[LazyModuleImp.scala:107:25] input [10:0] auto_dmInner_tl_in_a_bits_source, // @[LazyModuleImp.scala:107:25] input [11:0] auto_dmInner_tl_in_a_bits_address, // @[LazyModuleImp.scala:107:25] input [7:0] auto_dmInner_tl_in_a_bits_mask, // @[LazyModuleImp.scala:107:25] input [63:0] auto_dmInner_tl_in_a_bits_data, // @[LazyModuleImp.scala:107:25] input auto_dmInner_tl_in_a_bits_corrupt, // @[LazyModuleImp.scala:107:25] input auto_dmInner_tl_in_d_ready, // @[LazyModuleImp.scala:107:25] output auto_dmInner_tl_in_d_valid, // @[LazyModuleImp.scala:107:25] output [2:0] auto_dmInner_tl_in_d_bits_opcode, // @[LazyModuleImp.scala:107:25] output [1:0] auto_dmInner_tl_in_d_bits_size, // @[LazyModuleImp.scala:107:25] output [10:0] auto_dmInner_tl_in_d_bits_source, // @[LazyModuleImp.scala:107:25] output [63:0] auto_dmInner_tl_in_d_bits_data, // @[LazyModuleImp.scala:107:25] input io_debug_clock, // @[Debug.scala:1877:16] input io_debug_reset, // @[Debug.scala:1877:16] input io_tl_clock, // @[Debug.scala:1877:16] input io_tl_reset, // @[Debug.scala:1877:16] input io_dmactive, // @[Debug.scala:1877:16] input io_innerCtrl_mem_0_resumereq, // @[Debug.scala:1877:16] input [9:0] io_innerCtrl_mem_0_hartsel, // @[Debug.scala:1877:16] input io_innerCtrl_mem_0_ackhavereset, // @[Debug.scala:1877:16] input io_innerCtrl_mem_0_hasel, // @[Debug.scala:1877:16] input io_innerCtrl_mem_0_hamask_0, // @[Debug.scala:1877:16] input io_innerCtrl_mem_0_hamask_1, // @[Debug.scala:1877:16] input io_innerCtrl_mem_0_hamask_2, // @[Debug.scala:1877:16] input io_innerCtrl_mem_0_hamask_3, // @[Debug.scala:1877:16] input io_innerCtrl_mem_0_hrmask_0, // @[Debug.scala:1877:16] input io_innerCtrl_mem_0_hrmask_1, // @[Debug.scala:1877:16] input io_innerCtrl_mem_0_hrmask_2, // @[Debug.scala:1877:16] input io_innerCtrl_mem_0_hrmask_3, // @[Debug.scala:1877:16] output io_innerCtrl_ridx, // @[Debug.scala:1877:16] input io_innerCtrl_widx, // @[Debug.scala:1877:16] output io_innerCtrl_safe_ridx_valid, // @[Debug.scala:1877:16] input io_innerCtrl_safe_widx_valid, // @[Debug.scala:1877:16] input io_innerCtrl_safe_source_reset_n, // @[Debug.scala:1877:16] output io_innerCtrl_safe_sink_reset_n, // @[Debug.scala:1877:16] output io_hgDebugInt_0, // @[Debug.scala:1877:16] output io_hgDebugInt_1, // @[Debug.scala:1877:16] output io_hgDebugInt_2, // @[Debug.scala:1877:16] output io_hgDebugInt_3, // @[Debug.scala:1877:16] input io_hartIsInReset_0, // @[Debug.scala:1877:16] input io_hartIsInReset_1, // @[Debug.scala:1877:16] input io_hartIsInReset_2, // @[Debug.scala:1877:16] input io_hartIsInReset_3 // @[Debug.scala:1877:16] ); wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_valid; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_resumereq; // @[AsyncQueue.scala:211:22] wire [9:0] _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hartsel; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_ackhavereset; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hasel; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_0; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_1; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_2; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_3; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_0; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_1; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_2; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_3; // @[AsyncQueue.scala:211:22] wire _dmactive_synced_dmactive_synced_dmactiveSync_io_q; // @[ShiftReg.scala:45:23] wire _dmiXing_auto_out_a_valid; // @[Debug.scala:1858:27] wire [2:0] _dmiXing_auto_out_a_bits_opcode; // @[Debug.scala:1858:27] wire [2:0] _dmiXing_auto_out_a_bits_param; // @[Debug.scala:1858:27] wire [1:0] _dmiXing_auto_out_a_bits_size; // @[Debug.scala:1858:27] wire _dmiXing_auto_out_a_bits_source; // @[Debug.scala:1858:27] wire [8:0] _dmiXing_auto_out_a_bits_address; // @[Debug.scala:1858:27] wire [3:0] _dmiXing_auto_out_a_bits_mask; // @[Debug.scala:1858:27] wire [31:0] _dmiXing_auto_out_a_bits_data; // @[Debug.scala:1858:27] wire _dmiXing_auto_out_a_bits_corrupt; // @[Debug.scala:1858:27] wire _dmiXing_auto_out_d_ready; // @[Debug.scala:1858:27] wire _dmInner_auto_dmi_in_a_ready; // @[Debug.scala:1857:27] wire _dmInner_auto_dmi_in_d_valid; // @[Debug.scala:1857:27] wire [2:0] _dmInner_auto_dmi_in_d_bits_opcode; // @[Debug.scala:1857:27] wire [1:0] _dmInner_auto_dmi_in_d_bits_size; // @[Debug.scala:1857:27] wire _dmInner_auto_dmi_in_d_bits_source; // @[Debug.scala:1857:27] wire [31:0] _dmInner_auto_dmi_in_d_bits_data; // @[Debug.scala:1857:27] TLDebugModuleInner dmInner ( // @[Debug.scala:1857:27] .clock (io_debug_clock), .reset (io_debug_reset), .auto_sb2tlOpt_out_a_ready (auto_dmInner_sb2tlOpt_out_a_ready), .auto_sb2tlOpt_out_a_valid (auto_dmInner_sb2tlOpt_out_a_valid), .auto_sb2tlOpt_out_a_bits_opcode (auto_dmInner_sb2tlOpt_out_a_bits_opcode), .auto_sb2tlOpt_out_a_bits_size (auto_dmInner_sb2tlOpt_out_a_bits_size), .auto_sb2tlOpt_out_a_bits_address (auto_dmInner_sb2tlOpt_out_a_bits_address), .auto_sb2tlOpt_out_a_bits_data (auto_dmInner_sb2tlOpt_out_a_bits_data), .auto_sb2tlOpt_out_d_ready (auto_dmInner_sb2tlOpt_out_d_ready), .auto_sb2tlOpt_out_d_valid (auto_dmInner_sb2tlOpt_out_d_valid), .auto_sb2tlOpt_out_d_bits_opcode (auto_dmInner_sb2tlOpt_out_d_bits_opcode), .auto_sb2tlOpt_out_d_bits_param (auto_dmInner_sb2tlOpt_out_d_bits_param), .auto_sb2tlOpt_out_d_bits_size (auto_dmInner_sb2tlOpt_out_d_bits_size), .auto_sb2tlOpt_out_d_bits_sink (auto_dmInner_sb2tlOpt_out_d_bits_sink), .auto_sb2tlOpt_out_d_bits_denied (auto_dmInner_sb2tlOpt_out_d_bits_denied), .auto_sb2tlOpt_out_d_bits_data (auto_dmInner_sb2tlOpt_out_d_bits_data), .auto_sb2tlOpt_out_d_bits_corrupt (auto_dmInner_sb2tlOpt_out_d_bits_corrupt), .auto_tl_in_a_ready (auto_dmInner_tl_in_a_ready), .auto_tl_in_a_valid (auto_dmInner_tl_in_a_valid), .auto_tl_in_a_bits_opcode (auto_dmInner_tl_in_a_bits_opcode), .auto_tl_in_a_bits_param (auto_dmInner_tl_in_a_bits_param), .auto_tl_in_a_bits_size (auto_dmInner_tl_in_a_bits_size), .auto_tl_in_a_bits_source (auto_dmInner_tl_in_a_bits_source), .auto_tl_in_a_bits_address (auto_dmInner_tl_in_a_bits_address), .auto_tl_in_a_bits_mask (auto_dmInner_tl_in_a_bits_mask), .auto_tl_in_a_bits_data (auto_dmInner_tl_in_a_bits_data), .auto_tl_in_a_bits_corrupt (auto_dmInner_tl_in_a_bits_corrupt), .auto_tl_in_d_ready (auto_dmInner_tl_in_d_ready), .auto_tl_in_d_valid (auto_dmInner_tl_in_d_valid), .auto_tl_in_d_bits_opcode (auto_dmInner_tl_in_d_bits_opcode), .auto_tl_in_d_bits_size (auto_dmInner_tl_in_d_bits_size), .auto_tl_in_d_bits_source (auto_dmInner_tl_in_d_bits_source), .auto_tl_in_d_bits_data (auto_dmInner_tl_in_d_bits_data), .auto_dmi_in_a_ready (_dmInner_auto_dmi_in_a_ready), .auto_dmi_in_a_valid (_dmiXing_auto_out_a_valid), // @[Debug.scala:1858:27] .auto_dmi_in_a_bits_opcode (_dmiXing_auto_out_a_bits_opcode), // @[Debug.scala:1858:27] .auto_dmi_in_a_bits_param (_dmiXing_auto_out_a_bits_param), // @[Debug.scala:1858:27] .auto_dmi_in_a_bits_size (_dmiXing_auto_out_a_bits_size), // @[Debug.scala:1858:27] .auto_dmi_in_a_bits_source (_dmiXing_auto_out_a_bits_source), // @[Debug.scala:1858:27] .auto_dmi_in_a_bits_address (_dmiXing_auto_out_a_bits_address), // @[Debug.scala:1858:27] .auto_dmi_in_a_bits_mask (_dmiXing_auto_out_a_bits_mask), // @[Debug.scala:1858:27] .auto_dmi_in_a_bits_data (_dmiXing_auto_out_a_bits_data), // @[Debug.scala:1858:27] .auto_dmi_in_a_bits_corrupt (_dmiXing_auto_out_a_bits_corrupt), // @[Debug.scala:1858:27] .auto_dmi_in_d_ready (_dmiXing_auto_out_d_ready), // @[Debug.scala:1858:27] .auto_dmi_in_d_valid (_dmInner_auto_dmi_in_d_valid), .auto_dmi_in_d_bits_opcode (_dmInner_auto_dmi_in_d_bits_opcode), .auto_dmi_in_d_bits_size (_dmInner_auto_dmi_in_d_bits_size), .auto_dmi_in_d_bits_source (_dmInner_auto_dmi_in_d_bits_source), .auto_dmi_in_d_bits_data (_dmInner_auto_dmi_in_d_bits_data), .io_dmactive (_dmactive_synced_dmactive_synced_dmactiveSync_io_q), // @[ShiftReg.scala:45:23] .io_innerCtrl_valid (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_valid), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_resumereq (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_resumereq), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_hartsel (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hartsel), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_ackhavereset (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_ackhavereset), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_hasel (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hasel), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_hamask_0 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_0), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_hamask_1 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_1), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_hamask_2 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_2), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_hamask_3 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_3), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_hrmask_0 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_0), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_hrmask_1 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_1), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_hrmask_2 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_2), // @[AsyncQueue.scala:211:22] .io_innerCtrl_bits_hrmask_3 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_3), // @[AsyncQueue.scala:211:22] .io_hgDebugInt_0 (io_hgDebugInt_0), .io_hgDebugInt_1 (io_hgDebugInt_1), .io_hgDebugInt_2 (io_hgDebugInt_2), .io_hgDebugInt_3 (io_hgDebugInt_3), .io_hartIsInReset_0 (io_hartIsInReset_0), .io_hartIsInReset_1 (io_hartIsInReset_1), .io_hartIsInReset_2 (io_hartIsInReset_2), .io_hartIsInReset_3 (io_hartIsInReset_3), .io_tl_clock (io_tl_clock), .io_tl_reset (io_tl_reset) ); // @[Debug.scala:1857:27] TLAsyncCrossingSink_a9d32s1k1z2u dmiXing ( // @[Debug.scala:1858:27] .clock (io_debug_clock), .reset (io_debug_reset), .auto_in_a_mem_0_opcode (auto_dmiXing_in_a_mem_0_opcode), .auto_in_a_mem_0_address (auto_dmiXing_in_a_mem_0_address), .auto_in_a_mem_0_data (auto_dmiXing_in_a_mem_0_data), .auto_in_a_ridx (auto_dmiXing_in_a_ridx), .auto_in_a_widx (auto_dmiXing_in_a_widx), .auto_in_a_safe_ridx_valid (auto_dmiXing_in_a_safe_ridx_valid), .auto_in_a_safe_widx_valid (auto_dmiXing_in_a_safe_widx_valid), .auto_in_a_safe_source_reset_n (auto_dmiXing_in_a_safe_source_reset_n), .auto_in_a_safe_sink_reset_n (auto_dmiXing_in_a_safe_sink_reset_n), .auto_in_d_mem_0_opcode (auto_dmiXing_in_d_mem_0_opcode), .auto_in_d_mem_0_size (auto_dmiXing_in_d_mem_0_size), .auto_in_d_mem_0_source (auto_dmiXing_in_d_mem_0_source), .auto_in_d_mem_0_data (auto_dmiXing_in_d_mem_0_data), .auto_in_d_ridx (auto_dmiXing_in_d_ridx), .auto_in_d_widx (auto_dmiXing_in_d_widx), .auto_in_d_safe_ridx_valid (auto_dmiXing_in_d_safe_ridx_valid), .auto_in_d_safe_widx_valid (auto_dmiXing_in_d_safe_widx_valid), .auto_in_d_safe_source_reset_n (auto_dmiXing_in_d_safe_source_reset_n), .auto_in_d_safe_sink_reset_n (auto_dmiXing_in_d_safe_sink_reset_n), .auto_out_a_ready (_dmInner_auto_dmi_in_a_ready), // @[Debug.scala:1857:27] .auto_out_a_valid (_dmiXing_auto_out_a_valid), .auto_out_a_bits_opcode (_dmiXing_auto_out_a_bits_opcode), .auto_out_a_bits_param (_dmiXing_auto_out_a_bits_param), .auto_out_a_bits_size (_dmiXing_auto_out_a_bits_size), .auto_out_a_bits_source (_dmiXing_auto_out_a_bits_source), .auto_out_a_bits_address (_dmiXing_auto_out_a_bits_address), .auto_out_a_bits_mask (_dmiXing_auto_out_a_bits_mask), .auto_out_a_bits_data (_dmiXing_auto_out_a_bits_data), .auto_out_a_bits_corrupt (_dmiXing_auto_out_a_bits_corrupt), .auto_out_d_ready (_dmiXing_auto_out_d_ready), .auto_out_d_valid (_dmInner_auto_dmi_in_d_valid), // @[Debug.scala:1857:27] .auto_out_d_bits_opcode (_dmInner_auto_dmi_in_d_bits_opcode), // @[Debug.scala:1857:27] .auto_out_d_bits_size (_dmInner_auto_dmi_in_d_bits_size), // @[Debug.scala:1857:27] .auto_out_d_bits_source (_dmInner_auto_dmi_in_d_bits_source), // @[Debug.scala:1857:27] .auto_out_d_bits_data (_dmInner_auto_dmi_in_d_bits_data) // @[Debug.scala:1857:27] ); // @[Debug.scala:1858:27] AsyncResetSynchronizerShiftReg_w1_d3_i0 dmactive_synced_dmactive_synced_dmactiveSync ( // @[ShiftReg.scala:45:23] .clock (io_debug_clock), .reset (io_debug_reset), .io_d (io_dmactive), .io_q (_dmactive_synced_dmactive_synced_dmactiveSync_io_q) ); // @[ShiftReg.scala:45:23] AsyncQueueSink_DebugInternalBundle dmactive_synced_dmInner_io_innerCtrl_sink ( // @[AsyncQueue.scala:211:22] .clock (io_debug_clock), .reset (io_debug_reset), .io_deq_valid (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_valid), .io_deq_bits_resumereq (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_resumereq), .io_deq_bits_hartsel (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hartsel), .io_deq_bits_ackhavereset (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_ackhavereset), .io_deq_bits_hasel (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hasel), .io_deq_bits_hamask_0 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_0), .io_deq_bits_hamask_1 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_1), .io_deq_bits_hamask_2 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_2), .io_deq_bits_hamask_3 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hamask_3), .io_deq_bits_hrmask_0 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_0), .io_deq_bits_hrmask_1 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_1), .io_deq_bits_hrmask_2 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_2), .io_deq_bits_hrmask_3 (_dmactive_synced_dmInner_io_innerCtrl_sink_io_deq_bits_hrmask_3), .io_async_mem_0_resumereq (io_innerCtrl_mem_0_resumereq), .io_async_mem_0_hartsel (io_innerCtrl_mem_0_hartsel), .io_async_mem_0_ackhavereset (io_innerCtrl_mem_0_ackhavereset), .io_async_mem_0_hasel (io_innerCtrl_mem_0_hasel), .io_async_mem_0_hamask_0 (io_innerCtrl_mem_0_hamask_0), .io_async_mem_0_hamask_1 (io_innerCtrl_mem_0_hamask_1), .io_async_mem_0_hamask_2 (io_innerCtrl_mem_0_hamask_2), .io_async_mem_0_hamask_3 (io_innerCtrl_mem_0_hamask_3), .io_async_mem_0_hrmask_0 (io_innerCtrl_mem_0_hrmask_0), .io_async_mem_0_hrmask_1 (io_innerCtrl_mem_0_hrmask_1), .io_async_mem_0_hrmask_2 (io_innerCtrl_mem_0_hrmask_2), .io_async_mem_0_hrmask_3 (io_innerCtrl_mem_0_hrmask_3), .io_async_ridx (io_innerCtrl_ridx), .io_async_widx (io_innerCtrl_widx), .io_async_safe_ridx_valid (io_innerCtrl_safe_ridx_valid), .io_async_safe_widx_valid (io_innerCtrl_safe_widx_valid), .io_async_safe_source_reset_n (io_innerCtrl_safe_source_reset_n), .io_async_safe_sink_reset_n (io_innerCtrl_safe_sink_reset_n) ); // @[AsyncQueue.scala:211:22] endmodule